applied-ai-018 commited on
Commit
a6860c3
·
verified ·
1 Parent(s): fa55437

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__init__.py +403 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/__init__.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/auto_factory.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/configuration_auto.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/feature_extraction_auto.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/image_processing_auto.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_auto.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_flax_auto.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_tf_auto.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/processing_auto.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/tokenization_auto.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py +806 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py +984 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/feature_extraction_auto.py +396 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/image_processing_auto.py +437 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_auto.py +1705 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_flax_auto.py +382 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_tf_auto.py +721 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/processing_auto.py +358 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py +936 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/__init__.py +59 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez_fast.py +195 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/modeling_bigbird_pegasus.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__init__.py +79 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/__init__.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/configuration_imagegpt.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/convert_imagegpt_original_tf2_to_pytorch.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/feature_extraction_imagegpt.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/image_processing_imagegpt.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/modeling_imagegpt.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/configuration_imagegpt.py +199 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/convert_imagegpt_original_tf2_to_pytorch.py +72 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/feature_extraction_imagegpt.py +33 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/image_processing_imagegpt.py +314 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/modeling_imagegpt.py +1200 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__init__.py +60 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/__init__.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/configuration_informer.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/modeling_informer.cpython-310.pyc +0 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/models/informer/configuration_informer.py +249 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/models/informer/modeling_informer.py +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__init__.py +84 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/__init__.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/configuration_longt5.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/convert_longt5x_checkpoint_to_flax.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_flax_longt5.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_longt5.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py +174 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py +215 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__init__.py ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import (
18
+ OptionalDependencyNotAvailable,
19
+ _LazyModule,
20
+ is_flax_available,
21
+ is_tf_available,
22
+ is_torch_available,
23
+ )
24
+
25
+
26
+ _import_structure = {
27
+ "auto_factory": ["get_values"],
28
+ "configuration_auto": ["ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "AutoConfig"],
29
+ "feature_extraction_auto": ["FEATURE_EXTRACTOR_MAPPING", "AutoFeatureExtractor"],
30
+ "image_processing_auto": ["IMAGE_PROCESSOR_MAPPING", "AutoImageProcessor"],
31
+ "processing_auto": ["PROCESSOR_MAPPING", "AutoProcessor"],
32
+ "tokenization_auto": ["TOKENIZER_MAPPING", "AutoTokenizer"],
33
+ }
34
+
35
+ try:
36
+ if not is_torch_available():
37
+ raise OptionalDependencyNotAvailable()
38
+ except OptionalDependencyNotAvailable:
39
+ pass
40
+ else:
41
+ _import_structure["modeling_auto"] = [
42
+ "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING",
43
+ "MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING",
44
+ "MODEL_FOR_AUDIO_XVECTOR_MAPPING",
45
+ "MODEL_FOR_BACKBONE_MAPPING",
46
+ "MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING",
47
+ "MODEL_FOR_CAUSAL_LM_MAPPING",
48
+ "MODEL_FOR_CTC_MAPPING",
49
+ "MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
50
+ "MODEL_FOR_DEPTH_ESTIMATION_MAPPING",
51
+ "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
52
+ "MODEL_FOR_IMAGE_MAPPING",
53
+ "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING",
54
+ "MODEL_FOR_IMAGE_TO_IMAGE_MAPPING",
55
+ "MODEL_FOR_KEYPOINT_DETECTION_MAPPING",
56
+ "MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING",
57
+ "MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING",
58
+ "MODEL_FOR_MASKED_LM_MAPPING",
59
+ "MODEL_FOR_MASK_GENERATION_MAPPING",
60
+ "MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
61
+ "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
62
+ "MODEL_FOR_OBJECT_DETECTION_MAPPING",
63
+ "MODEL_FOR_PRETRAINING_MAPPING",
64
+ "MODEL_FOR_QUESTION_ANSWERING_MAPPING",
65
+ "MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING",
66
+ "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
67
+ "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
68
+ "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
69
+ "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING",
70
+ "MODEL_FOR_TEXT_ENCODING_MAPPING",
71
+ "MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING",
72
+ "MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING",
73
+ "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
74
+ "MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING",
75
+ "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING",
76
+ "MODEL_FOR_VISION_2_SEQ_MAPPING",
77
+ "MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING",
78
+ "MODEL_MAPPING",
79
+ "MODEL_WITH_LM_HEAD_MAPPING",
80
+ "MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING",
81
+ "MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING",
82
+ "MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING",
83
+ "MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING",
84
+ "AutoModel",
85
+ "AutoBackbone",
86
+ "AutoModelForAudioClassification",
87
+ "AutoModelForAudioFrameClassification",
88
+ "AutoModelForAudioXVector",
89
+ "AutoModelForCausalLM",
90
+ "AutoModelForCTC",
91
+ "AutoModelForDepthEstimation",
92
+ "AutoModelForImageClassification",
93
+ "AutoModelForImageSegmentation",
94
+ "AutoModelForImageToImage",
95
+ "AutoModelForInstanceSegmentation",
96
+ "AutoModelForKeypointDetection",
97
+ "AutoModelForMaskGeneration",
98
+ "AutoModelForTextEncoding",
99
+ "AutoModelForMaskedImageModeling",
100
+ "AutoModelForMaskedLM",
101
+ "AutoModelForMultipleChoice",
102
+ "AutoModelForNextSentencePrediction",
103
+ "AutoModelForObjectDetection",
104
+ "AutoModelForPreTraining",
105
+ "AutoModelForQuestionAnswering",
106
+ "AutoModelForSemanticSegmentation",
107
+ "AutoModelForSeq2SeqLM",
108
+ "AutoModelForSequenceClassification",
109
+ "AutoModelForSpeechSeq2Seq",
110
+ "AutoModelForTableQuestionAnswering",
111
+ "AutoModelForTextToSpectrogram",
112
+ "AutoModelForTextToWaveform",
113
+ "AutoModelForTokenClassification",
114
+ "AutoModelForUniversalSegmentation",
115
+ "AutoModelForVideoClassification",
116
+ "AutoModelForVision2Seq",
117
+ "AutoModelForVisualQuestionAnswering",
118
+ "AutoModelForDocumentQuestionAnswering",
119
+ "AutoModelWithLMHead",
120
+ "AutoModelForZeroShotImageClassification",
121
+ "AutoModelForZeroShotObjectDetection",
122
+ ]
123
+
124
+ try:
125
+ if not is_tf_available():
126
+ raise OptionalDependencyNotAvailable()
127
+ except OptionalDependencyNotAvailable:
128
+ pass
129
+ else:
130
+ _import_structure["modeling_tf_auto"] = [
131
+ "TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING",
132
+ "TF_MODEL_FOR_CAUSAL_LM_MAPPING",
133
+ "TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
134
+ "TF_MODEL_FOR_MASK_GENERATION_MAPPING",
135
+ "TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING",
136
+ "TF_MODEL_FOR_MASKED_LM_MAPPING",
137
+ "TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
138
+ "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
139
+ "TF_MODEL_FOR_PRETRAINING_MAPPING",
140
+ "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
141
+ "TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING",
142
+ "TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING",
143
+ "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
144
+ "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
145
+ "TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
146
+ "TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING",
147
+ "TF_MODEL_FOR_TEXT_ENCODING_MAPPING",
148
+ "TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
149
+ "TF_MODEL_FOR_VISION_2_SEQ_MAPPING",
150
+ "TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING",
151
+ "TF_MODEL_MAPPING",
152
+ "TF_MODEL_WITH_LM_HEAD_MAPPING",
153
+ "TFAutoModel",
154
+ "TFAutoModelForAudioClassification",
155
+ "TFAutoModelForCausalLM",
156
+ "TFAutoModelForImageClassification",
157
+ "TFAutoModelForMaskedImageModeling",
158
+ "TFAutoModelForMaskedLM",
159
+ "TFAutoModelForMaskGeneration",
160
+ "TFAutoModelForMultipleChoice",
161
+ "TFAutoModelForNextSentencePrediction",
162
+ "TFAutoModelForPreTraining",
163
+ "TFAutoModelForDocumentQuestionAnswering",
164
+ "TFAutoModelForQuestionAnswering",
165
+ "TFAutoModelForSemanticSegmentation",
166
+ "TFAutoModelForSeq2SeqLM",
167
+ "TFAutoModelForSequenceClassification",
168
+ "TFAutoModelForSpeechSeq2Seq",
169
+ "TFAutoModelForTableQuestionAnswering",
170
+ "TFAutoModelForTextEncoding",
171
+ "TFAutoModelForTokenClassification",
172
+ "TFAutoModelForVision2Seq",
173
+ "TFAutoModelForZeroShotImageClassification",
174
+ "TFAutoModelWithLMHead",
175
+ ]
176
+
177
+ try:
178
+ if not is_flax_available():
179
+ raise OptionalDependencyNotAvailable()
180
+ except OptionalDependencyNotAvailable:
181
+ pass
182
+ else:
183
+ _import_structure["modeling_flax_auto"] = [
184
+ "FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING",
185
+ "FLAX_MODEL_FOR_CAUSAL_LM_MAPPING",
186
+ "FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING",
187
+ "FLAX_MODEL_FOR_MASKED_LM_MAPPING",
188
+ "FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING",
189
+ "FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING",
190
+ "FLAX_MODEL_FOR_PRETRAINING_MAPPING",
191
+ "FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING",
192
+ "FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING",
193
+ "FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING",
194
+ "FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING",
195
+ "FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING",
196
+ "FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING",
197
+ "FLAX_MODEL_MAPPING",
198
+ "FlaxAutoModel",
199
+ "FlaxAutoModelForCausalLM",
200
+ "FlaxAutoModelForImageClassification",
201
+ "FlaxAutoModelForMaskedLM",
202
+ "FlaxAutoModelForMultipleChoice",
203
+ "FlaxAutoModelForNextSentencePrediction",
204
+ "FlaxAutoModelForPreTraining",
205
+ "FlaxAutoModelForQuestionAnswering",
206
+ "FlaxAutoModelForSeq2SeqLM",
207
+ "FlaxAutoModelForSequenceClassification",
208
+ "FlaxAutoModelForSpeechSeq2Seq",
209
+ "FlaxAutoModelForTokenClassification",
210
+ "FlaxAutoModelForVision2Seq",
211
+ ]
212
+
213
+
214
+ if TYPE_CHECKING:
215
+ from .auto_factory import get_values
216
+ from .configuration_auto import ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, MODEL_NAMES_MAPPING, AutoConfig
217
+ from .feature_extraction_auto import FEATURE_EXTRACTOR_MAPPING, AutoFeatureExtractor
218
+ from .image_processing_auto import IMAGE_PROCESSOR_MAPPING, AutoImageProcessor
219
+ from .processing_auto import PROCESSOR_MAPPING, AutoProcessor
220
+ from .tokenization_auto import TOKENIZER_MAPPING, AutoTokenizer
221
+
222
+ try:
223
+ if not is_torch_available():
224
+ raise OptionalDependencyNotAvailable()
225
+ except OptionalDependencyNotAvailable:
226
+ pass
227
+ else:
228
+ from .modeling_auto import (
229
+ MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
230
+ MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING,
231
+ MODEL_FOR_AUDIO_XVECTOR_MAPPING,
232
+ MODEL_FOR_BACKBONE_MAPPING,
233
+ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING,
234
+ MODEL_FOR_CAUSAL_LM_MAPPING,
235
+ MODEL_FOR_CTC_MAPPING,
236
+ MODEL_FOR_DEPTH_ESTIMATION_MAPPING,
237
+ MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING,
238
+ MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
239
+ MODEL_FOR_IMAGE_MAPPING,
240
+ MODEL_FOR_IMAGE_SEGMENTATION_MAPPING,
241
+ MODEL_FOR_IMAGE_TO_IMAGE_MAPPING,
242
+ MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING,
243
+ MODEL_FOR_KEYPOINT_DETECTION_MAPPING,
244
+ MODEL_FOR_MASK_GENERATION_MAPPING,
245
+ MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
246
+ MODEL_FOR_MASKED_LM_MAPPING,
247
+ MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
248
+ MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
249
+ MODEL_FOR_OBJECT_DETECTION_MAPPING,
250
+ MODEL_FOR_PRETRAINING_MAPPING,
251
+ MODEL_FOR_QUESTION_ANSWERING_MAPPING,
252
+ MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
253
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
254
+ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
255
+ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
256
+ MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
257
+ MODEL_FOR_TEXT_ENCODING_MAPPING,
258
+ MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING,
259
+ MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING,
260
+ MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING,
261
+ MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING,
262
+ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
263
+ MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING,
264
+ MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
265
+ MODEL_FOR_VISION_2_SEQ_MAPPING,
266
+ MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING,
267
+ MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING,
268
+ MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING,
269
+ MODEL_MAPPING,
270
+ MODEL_WITH_LM_HEAD_MAPPING,
271
+ AutoBackbone,
272
+ AutoModel,
273
+ AutoModelForAudioClassification,
274
+ AutoModelForAudioFrameClassification,
275
+ AutoModelForAudioXVector,
276
+ AutoModelForCausalLM,
277
+ AutoModelForCTC,
278
+ AutoModelForDepthEstimation,
279
+ AutoModelForDocumentQuestionAnswering,
280
+ AutoModelForImageClassification,
281
+ AutoModelForImageSegmentation,
282
+ AutoModelForImageToImage,
283
+ AutoModelForInstanceSegmentation,
284
+ AutoModelForKeypointDetection,
285
+ AutoModelForMaskedImageModeling,
286
+ AutoModelForMaskedLM,
287
+ AutoModelForMaskGeneration,
288
+ AutoModelForMultipleChoice,
289
+ AutoModelForNextSentencePrediction,
290
+ AutoModelForObjectDetection,
291
+ AutoModelForPreTraining,
292
+ AutoModelForQuestionAnswering,
293
+ AutoModelForSemanticSegmentation,
294
+ AutoModelForSeq2SeqLM,
295
+ AutoModelForSequenceClassification,
296
+ AutoModelForSpeechSeq2Seq,
297
+ AutoModelForTableQuestionAnswering,
298
+ AutoModelForTextEncoding,
299
+ AutoModelForTextToSpectrogram,
300
+ AutoModelForTextToWaveform,
301
+ AutoModelForTokenClassification,
302
+ AutoModelForUniversalSegmentation,
303
+ AutoModelForVideoClassification,
304
+ AutoModelForVision2Seq,
305
+ AutoModelForVisualQuestionAnswering,
306
+ AutoModelForZeroShotImageClassification,
307
+ AutoModelForZeroShotObjectDetection,
308
+ AutoModelWithLMHead,
309
+ )
310
+
311
+ try:
312
+ if not is_tf_available():
313
+ raise OptionalDependencyNotAvailable()
314
+ except OptionalDependencyNotAvailable:
315
+ pass
316
+ else:
317
+ from .modeling_tf_auto import (
318
+ TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
319
+ TF_MODEL_FOR_CAUSAL_LM_MAPPING,
320
+ TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING,
321
+ TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
322
+ TF_MODEL_FOR_MASK_GENERATION_MAPPING,
323
+ TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
324
+ TF_MODEL_FOR_MASKED_LM_MAPPING,
325
+ TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
326
+ TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
327
+ TF_MODEL_FOR_PRETRAINING_MAPPING,
328
+ TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
329
+ TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING,
330
+ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
331
+ TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
332
+ TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
333
+ TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING,
334
+ TF_MODEL_FOR_TEXT_ENCODING_MAPPING,
335
+ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
336
+ TF_MODEL_FOR_VISION_2_SEQ_MAPPING,
337
+ TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING,
338
+ TF_MODEL_MAPPING,
339
+ TF_MODEL_WITH_LM_HEAD_MAPPING,
340
+ TFAutoModel,
341
+ TFAutoModelForAudioClassification,
342
+ TFAutoModelForCausalLM,
343
+ TFAutoModelForDocumentQuestionAnswering,
344
+ TFAutoModelForImageClassification,
345
+ TFAutoModelForMaskedImageModeling,
346
+ TFAutoModelForMaskedLM,
347
+ TFAutoModelForMaskGeneration,
348
+ TFAutoModelForMultipleChoice,
349
+ TFAutoModelForNextSentencePrediction,
350
+ TFAutoModelForPreTraining,
351
+ TFAutoModelForQuestionAnswering,
352
+ TFAutoModelForSemanticSegmentation,
353
+ TFAutoModelForSeq2SeqLM,
354
+ TFAutoModelForSequenceClassification,
355
+ TFAutoModelForSpeechSeq2Seq,
356
+ TFAutoModelForTableQuestionAnswering,
357
+ TFAutoModelForTextEncoding,
358
+ TFAutoModelForTokenClassification,
359
+ TFAutoModelForVision2Seq,
360
+ TFAutoModelForZeroShotImageClassification,
361
+ TFAutoModelWithLMHead,
362
+ )
363
+
364
+ try:
365
+ if not is_flax_available():
366
+ raise OptionalDependencyNotAvailable()
367
+ except OptionalDependencyNotAvailable:
368
+ pass
369
+ else:
370
+ from .modeling_flax_auto import (
371
+ FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING,
372
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
373
+ FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
374
+ FLAX_MODEL_FOR_MASKED_LM_MAPPING,
375
+ FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
376
+ FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
377
+ FLAX_MODEL_FOR_PRETRAINING_MAPPING,
378
+ FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
379
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
380
+ FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
381
+ FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING,
382
+ FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
383
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
384
+ FLAX_MODEL_MAPPING,
385
+ FlaxAutoModel,
386
+ FlaxAutoModelForCausalLM,
387
+ FlaxAutoModelForImageClassification,
388
+ FlaxAutoModelForMaskedLM,
389
+ FlaxAutoModelForMultipleChoice,
390
+ FlaxAutoModelForNextSentencePrediction,
391
+ FlaxAutoModelForPreTraining,
392
+ FlaxAutoModelForQuestionAnswering,
393
+ FlaxAutoModelForSeq2SeqLM,
394
+ FlaxAutoModelForSequenceClassification,
395
+ FlaxAutoModelForSpeechSeq2Seq,
396
+ FlaxAutoModelForTokenClassification,
397
+ FlaxAutoModelForVision2Seq,
398
+ )
399
+
400
+ else:
401
+ import sys
402
+
403
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (8.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/auto_factory.cpython-310.pyc ADDED
Binary file (30.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/configuration_auto.cpython-310.pyc ADDED
Binary file (28.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/feature_extraction_auto.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/image_processing_auto.cpython-310.pyc ADDED
Binary file (16.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_auto.cpython-310.pyc ADDED
Binary file (40.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_flax_auto.cpython-310.pyc ADDED
Binary file (9.41 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/modeling_tf_auto.cpython-310.pyc ADDED
Binary file (17.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/processing_auto.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/__pycache__/tokenization_auto.cpython-310.pyc ADDED
Binary file (28 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/auto_factory.py ADDED
@@ -0,0 +1,806 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Factory function to build auto-model classes."""
16
+ import copy
17
+ import importlib
18
+ import json
19
+ import os
20
+ import warnings
21
+ from collections import OrderedDict
22
+
23
+ from ...configuration_utils import PretrainedConfig
24
+ from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
25
+ from ...utils import (
26
+ CONFIG_NAME,
27
+ cached_file,
28
+ copy_func,
29
+ extract_commit_hash,
30
+ find_adapter_config_file,
31
+ is_peft_available,
32
+ logging,
33
+ requires_backends,
34
+ )
35
+ from .configuration_auto import AutoConfig, model_type_to_module_name, replace_list_option_in_docstrings
36
+
37
+
38
+ logger = logging.get_logger(__name__)
39
+
40
+
41
+ CLASS_DOCSTRING = """
42
+ This is a generic model class that will be instantiated as one of the model classes of the library when created
43
+ with the [`~BaseAutoModelClass.from_pretrained`] class method or the [`~BaseAutoModelClass.from_config`] class
44
+ method.
45
+
46
+ This class cannot be instantiated directly using `__init__()` (throws an error).
47
+ """
48
+
49
+ FROM_CONFIG_DOCSTRING = """
50
+ Instantiates one of the model classes of the library from a configuration.
51
+
52
+ Note:
53
+ Loading a model from its configuration file does **not** load the model weights. It only affects the
54
+ model's configuration. Use [`~BaseAutoModelClass.from_pretrained`] to load the model weights.
55
+
56
+ Args:
57
+ config ([`PretrainedConfig`]):
58
+ The model class to instantiate is selected based on the configuration class:
59
+
60
+ List options
61
+ attn_implementation (`str`, *optional*):
62
+ The attention implementation to use in the model (if relevant). Can be any of `"eager"` (manual implementation of the attention), `"sdpa"` (using [`F.scaled_dot_product_attention`](https://pytorch.org/docs/master/generated/torch.nn.functional.scaled_dot_product_attention.html)), or `"flash_attention_2"` (using [Dao-AILab/flash-attention](https://github.com/Dao-AILab/flash-attention)). By default, if available, SDPA will be used for torch>=2.1.1. The default is otherwise the manual `"eager"` implementation.
63
+
64
+ Examples:
65
+
66
+ ```python
67
+ >>> from transformers import AutoConfig, BaseAutoModelClass
68
+
69
+ >>> # Download configuration from huggingface.co and cache.
70
+ >>> config = AutoConfig.from_pretrained("checkpoint_placeholder")
71
+ >>> model = BaseAutoModelClass.from_config(config)
72
+ ```
73
+ """
74
+
75
+ FROM_PRETRAINED_TORCH_DOCSTRING = """
76
+ Instantiate one of the model classes of the library from a pretrained model.
77
+
78
+ The model class to instantiate is selected based on the `model_type` property of the config object (either
79
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
80
+ falling back to using pattern matching on `pretrained_model_name_or_path`:
81
+
82
+ List options
83
+
84
+ The model is set in evaluation mode by default using `model.eval()` (so for instance, dropout modules are
85
+ deactivated). To train the model, you should first set it back in training mode with `model.train()`
86
+
87
+ Args:
88
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
89
+ Can be either:
90
+
91
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
92
+ - A path to a *directory* containing model weights saved using
93
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
94
+ - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
95
+ this case, `from_tf` should be set to `True` and a configuration object should be provided as
96
+ `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
97
+ PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
98
+ model_args (additional positional arguments, *optional*):
99
+ Will be passed along to the underlying model `__init__()` method.
100
+ config ([`PretrainedConfig`], *optional*):
101
+ Configuration for the model to use instead of an automatically loaded configuration. Configuration can
102
+ be automatically loaded when:
103
+
104
+ - The model is a model provided by the library (loaded with the *model id* string of a pretrained
105
+ model).
106
+ - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
107
+ save directory.
108
+ - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
109
+ configuration JSON file named *config.json* is found in the directory.
110
+ state_dict (*Dict[str, torch.Tensor]*, *optional*):
111
+ A state dictionary to use instead of a state dictionary loaded from saved weights file.
112
+
113
+ This option can be used if you want to create a model from a pretrained configuration but load your own
114
+ weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and
115
+ [`~PreTrainedModel.from_pretrained`] is not a simpler option.
116
+ cache_dir (`str` or `os.PathLike`, *optional*):
117
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
118
+ standard cache should not be used.
119
+ from_tf (`bool`, *optional*, defaults to `False`):
120
+ Load the model weights from a TensorFlow checkpoint save file (see docstring of
121
+ `pretrained_model_name_or_path` argument).
122
+ force_download (`bool`, *optional*, defaults to `False`):
123
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
124
+ cached versions if they exist.
125
+ resume_download (`bool`, *optional*, defaults to `False`):
126
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
127
+ file exists.
128
+ proxies (`Dict[str, str]`, *optional*):
129
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
130
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
131
+ output_loading_info(`bool`, *optional*, defaults to `False`):
132
+ Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
133
+ local_files_only(`bool`, *optional*, defaults to `False`):
134
+ Whether or not to only look at local files (e.g., not try downloading the model).
135
+ revision (`str`, *optional*, defaults to `"main"`):
136
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
137
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
138
+ identifier allowed by git.
139
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
140
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
141
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
142
+ execute code present on the Hub on your local machine.
143
+ code_revision (`str`, *optional*, defaults to `"main"`):
144
+ The specific revision to use for the code on the Hub, if the code leaves in a different repository than
145
+ the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
146
+ system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
147
+ allowed by git.
148
+ kwargs (additional keyword arguments, *optional*):
149
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
150
+ `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
151
+ automatically loaded:
152
+
153
+ - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
154
+ underlying model's `__init__` method (we assume all relevant updates to the configuration have
155
+ already been done)
156
+ - If a configuration is not provided, `kwargs` will be first passed to the configuration class
157
+ initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
158
+ corresponds to a configuration attribute will be used to override said attribute with the
159
+ supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
160
+ will be passed to the underlying model's `__init__` function.
161
+
162
+ Examples:
163
+
164
+ ```python
165
+ >>> from transformers import AutoConfig, BaseAutoModelClass
166
+
167
+ >>> # Download model and configuration from huggingface.co and cache.
168
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
169
+
170
+ >>> # Update configuration during loading
171
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
172
+ >>> model.config.output_attentions
173
+ True
174
+
175
+ >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower)
176
+ >>> config = AutoConfig.from_pretrained("./tf_model/shortcut_placeholder_tf_model_config.json")
177
+ >>> model = BaseAutoModelClass.from_pretrained(
178
+ ... "./tf_model/shortcut_placeholder_tf_checkpoint.ckpt.index", from_tf=True, config=config
179
+ ... )
180
+ ```
181
+ """
182
+
183
+ FROM_PRETRAINED_TF_DOCSTRING = """
184
+ Instantiate one of the model classes of the library from a pretrained model.
185
+
186
+ The model class to instantiate is selected based on the `model_type` property of the config object (either
187
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
188
+ falling back to using pattern matching on `pretrained_model_name_or_path`:
189
+
190
+ List options
191
+
192
+ Args:
193
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
194
+ Can be either:
195
+
196
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
197
+ - A path to a *directory* containing model weights saved using
198
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
199
+ - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
200
+ case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
201
+ argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
202
+ using the provided conversion scripts and loading the TensorFlow model afterwards.
203
+ model_args (additional positional arguments, *optional*):
204
+ Will be passed along to the underlying model `__init__()` method.
205
+ config ([`PretrainedConfig`], *optional*):
206
+ Configuration for the model to use instead of an automatically loaded configuration. Configuration can
207
+ be automatically loaded when:
208
+
209
+ - The model is a model provided by the library (loaded with the *model id* string of a pretrained
210
+ model).
211
+ - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
212
+ save directory.
213
+ - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
214
+ configuration JSON file named *config.json* is found in the directory.
215
+ cache_dir (`str` or `os.PathLike`, *optional*):
216
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
217
+ standard cache should not be used.
218
+ from_pt (`bool`, *optional*, defaults to `False`):
219
+ Load the model weights from a PyTorch checkpoint save file (see docstring of
220
+ `pretrained_model_name_or_path` argument).
221
+ force_download (`bool`, *optional*, defaults to `False`):
222
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
223
+ cached versions if they exist.
224
+ resume_download (`bool`, *optional*, defaults to `False`):
225
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
226
+ file exists.
227
+ proxies (`Dict[str, str]`, *optional*):
228
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
229
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
230
+ output_loading_info(`bool`, *optional*, defaults to `False`):
231
+ Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
232
+ local_files_only(`bool`, *optional*, defaults to `False`):
233
+ Whether or not to only look at local files (e.g., not try downloading the model).
234
+ revision (`str`, *optional*, defaults to `"main"`):
235
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
236
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
237
+ identifier allowed by git.
238
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
239
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
240
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
241
+ execute code present on the Hub on your local machine.
242
+ code_revision (`str`, *optional*, defaults to `"main"`):
243
+ The specific revision to use for the code on the Hub, if the code leaves in a different repository than
244
+ the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
245
+ system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
246
+ allowed by git.
247
+ kwargs (additional keyword arguments, *optional*):
248
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
249
+ `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
250
+ automatically loaded:
251
+
252
+ - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
253
+ underlying model's `__init__` method (we assume all relevant updates to the configuration have
254
+ already been done)
255
+ - If a configuration is not provided, `kwargs` will be first passed to the configuration class
256
+ initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
257
+ corresponds to a configuration attribute will be used to override said attribute with the
258
+ supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
259
+ will be passed to the underlying model's `__init__` function.
260
+
261
+ Examples:
262
+
263
+ ```python
264
+ >>> from transformers import AutoConfig, BaseAutoModelClass
265
+
266
+ >>> # Download model and configuration from huggingface.co and cache.
267
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
268
+
269
+ >>> # Update configuration during loading
270
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
271
+ >>> model.config.output_attentions
272
+ True
273
+
274
+ >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
275
+ >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")
276
+ >>> model = BaseAutoModelClass.from_pretrained(
277
+ ... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config
278
+ ... )
279
+ ```
280
+ """
281
+
282
+ FROM_PRETRAINED_FLAX_DOCSTRING = """
283
+ Instantiate one of the model classes of the library from a pretrained model.
284
+
285
+ The model class to instantiate is selected based on the `model_type` property of the config object (either
286
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
287
+ falling back to using pattern matching on `pretrained_model_name_or_path`:
288
+
289
+ List options
290
+
291
+ Args:
292
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
293
+ Can be either:
294
+
295
+ - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
296
+ - A path to a *directory* containing model weights saved using
297
+ [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
298
+ - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
299
+ case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
300
+ argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
301
+ using the provided conversion scripts and loading the TensorFlow model afterwards.
302
+ model_args (additional positional arguments, *optional*):
303
+ Will be passed along to the underlying model `__init__()` method.
304
+ config ([`PretrainedConfig`], *optional*):
305
+ Configuration for the model to use instead of an automatically loaded configuration. Configuration can
306
+ be automatically loaded when:
307
+
308
+ - The model is a model provided by the library (loaded with the *model id* string of a pretrained
309
+ model).
310
+ - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
311
+ save directory.
312
+ - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
313
+ configuration JSON file named *config.json* is found in the directory.
314
+ cache_dir (`str` or `os.PathLike`, *optional*):
315
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
316
+ standard cache should not be used.
317
+ from_pt (`bool`, *optional*, defaults to `False`):
318
+ Load the model weights from a PyTorch checkpoint save file (see docstring of
319
+ `pretrained_model_name_or_path` argument).
320
+ force_download (`bool`, *optional*, defaults to `False`):
321
+ Whether or not to force the (re-)download of the model weights and configuration files, overriding the
322
+ cached versions if they exist.
323
+ resume_download (`bool`, *optional*, defaults to `False`):
324
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
325
+ file exists.
326
+ proxies (`Dict[str, str]`, *optional*):
327
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
328
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
329
+ output_loading_info(`bool`, *optional*, defaults to `False`):
330
+ Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
331
+ local_files_only(`bool`, *optional*, defaults to `False`):
332
+ Whether or not to only look at local files (e.g., not try downloading the model).
333
+ revision (`str`, *optional*, defaults to `"main"`):
334
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
335
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
336
+ identifier allowed by git.
337
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
338
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
339
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
340
+ execute code present on the Hub on your local machine.
341
+ code_revision (`str`, *optional*, defaults to `"main"`):
342
+ The specific revision to use for the code on the Hub, if the code leaves in a different repository than
343
+ the rest of the model. It can be a branch name, a tag name, or a commit id, since we use a git-based
344
+ system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier
345
+ allowed by git.
346
+ kwargs (additional keyword arguments, *optional*):
347
+ Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
348
+ `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
349
+ automatically loaded:
350
+
351
+ - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
352
+ underlying model's `__init__` method (we assume all relevant updates to the configuration have
353
+ already been done)
354
+ - If a configuration is not provided, `kwargs` will be first passed to the configuration class
355
+ initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
356
+ corresponds to a configuration attribute will be used to override said attribute with the
357
+ supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
358
+ will be passed to the underlying model's `__init__` function.
359
+
360
+ Examples:
361
+
362
+ ```python
363
+ >>> from transformers import AutoConfig, BaseAutoModelClass
364
+
365
+ >>> # Download model and configuration from huggingface.co and cache.
366
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder")
367
+
368
+ >>> # Update configuration during loading
369
+ >>> model = BaseAutoModelClass.from_pretrained("checkpoint_placeholder", output_attentions=True)
370
+ >>> model.config.output_attentions
371
+ True
372
+
373
+ >>> # Loading from a PyTorch checkpoint file instead of a TensorFlow model (slower)
374
+ >>> config = AutoConfig.from_pretrained("./pt_model/shortcut_placeholder_pt_model_config.json")
375
+ >>> model = BaseAutoModelClass.from_pretrained(
376
+ ... "./pt_model/shortcut_placeholder_pytorch_model.bin", from_pt=True, config=config
377
+ ... )
378
+ ```
379
+ """
380
+
381
+
382
+ def _get_model_class(config, model_mapping):
383
+ supported_models = model_mapping[type(config)]
384
+ if not isinstance(supported_models, (list, tuple)):
385
+ return supported_models
386
+
387
+ name_to_model = {model.__name__: model for model in supported_models}
388
+ architectures = getattr(config, "architectures", [])
389
+ for arch in architectures:
390
+ if arch in name_to_model:
391
+ return name_to_model[arch]
392
+ elif f"TF{arch}" in name_to_model:
393
+ return name_to_model[f"TF{arch}"]
394
+ elif f"Flax{arch}" in name_to_model:
395
+ return name_to_model[f"Flax{arch}"]
396
+
397
+ # If not architecture is set in the config or match the supported models, the first element of the tuple is the
398
+ # defaults.
399
+ return supported_models[0]
400
+
401
+
402
+ class _BaseAutoModelClass:
403
+ # Base class for auto models.
404
+ _model_mapping = None
405
+
406
+ def __init__(self, *args, **kwargs):
407
+ raise EnvironmentError(
408
+ f"{self.__class__.__name__} is designed to be instantiated "
409
+ f"using the `{self.__class__.__name__}.from_pretrained(pretrained_model_name_or_path)` or "
410
+ f"`{self.__class__.__name__}.from_config(config)` methods."
411
+ )
412
+
413
+ @classmethod
414
+ def from_config(cls, config, **kwargs):
415
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
416
+ has_remote_code = hasattr(config, "auto_map") and cls.__name__ in config.auto_map
417
+ has_local_code = type(config) in cls._model_mapping.keys()
418
+ trust_remote_code = resolve_trust_remote_code(
419
+ trust_remote_code, config._name_or_path, has_local_code, has_remote_code
420
+ )
421
+
422
+ if has_remote_code and trust_remote_code:
423
+ class_ref = config.auto_map[cls.__name__]
424
+ if "--" in class_ref:
425
+ repo_id, class_ref = class_ref.split("--")
426
+ else:
427
+ repo_id = config.name_or_path
428
+ model_class = get_class_from_dynamic_module(class_ref, repo_id, **kwargs)
429
+ if os.path.isdir(config._name_or_path):
430
+ model_class.register_for_auto_class(cls.__name__)
431
+ else:
432
+ cls.register(config.__class__, model_class, exist_ok=True)
433
+ _ = kwargs.pop("code_revision", None)
434
+ return model_class._from_config(config, **kwargs)
435
+ elif type(config) in cls._model_mapping.keys():
436
+ model_class = _get_model_class(config, cls._model_mapping)
437
+ return model_class._from_config(config, **kwargs)
438
+
439
+ raise ValueError(
440
+ f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
441
+ f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
442
+ )
443
+
444
+ @classmethod
445
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
446
+ config = kwargs.pop("config", None)
447
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
448
+ kwargs["_from_auto"] = True
449
+ hub_kwargs_names = [
450
+ "cache_dir",
451
+ "force_download",
452
+ "local_files_only",
453
+ "proxies",
454
+ "resume_download",
455
+ "revision",
456
+ "subfolder",
457
+ "use_auth_token",
458
+ "token",
459
+ ]
460
+ hub_kwargs = {name: kwargs.pop(name) for name in hub_kwargs_names if name in kwargs}
461
+ code_revision = kwargs.pop("code_revision", None)
462
+ commit_hash = kwargs.pop("_commit_hash", None)
463
+ adapter_kwargs = kwargs.pop("adapter_kwargs", None)
464
+
465
+ token = hub_kwargs.pop("token", None)
466
+ use_auth_token = hub_kwargs.pop("use_auth_token", None)
467
+ if use_auth_token is not None:
468
+ warnings.warn(
469
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
470
+ FutureWarning,
471
+ )
472
+ if token is not None:
473
+ raise ValueError(
474
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
475
+ )
476
+ token = use_auth_token
477
+
478
+ if token is not None:
479
+ hub_kwargs["token"] = token
480
+
481
+ if commit_hash is None:
482
+ if not isinstance(config, PretrainedConfig):
483
+ # We make a call to the config file first (which may be absent) to get the commit hash as soon as possible
484
+ resolved_config_file = cached_file(
485
+ pretrained_model_name_or_path,
486
+ CONFIG_NAME,
487
+ _raise_exceptions_for_gated_repo=False,
488
+ _raise_exceptions_for_missing_entries=False,
489
+ _raise_exceptions_for_connection_errors=False,
490
+ **hub_kwargs,
491
+ )
492
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
493
+ else:
494
+ commit_hash = getattr(config, "_commit_hash", None)
495
+
496
+ if is_peft_available():
497
+ if adapter_kwargs is None:
498
+ adapter_kwargs = {}
499
+ if token is not None:
500
+ adapter_kwargs["token"] = token
501
+
502
+ maybe_adapter_path = find_adapter_config_file(
503
+ pretrained_model_name_or_path, _commit_hash=commit_hash, **adapter_kwargs
504
+ )
505
+
506
+ if maybe_adapter_path is not None:
507
+ with open(maybe_adapter_path, "r", encoding="utf-8") as f:
508
+ adapter_config = json.load(f)
509
+
510
+ adapter_kwargs["_adapter_model_path"] = pretrained_model_name_or_path
511
+ pretrained_model_name_or_path = adapter_config["base_model_name_or_path"]
512
+
513
+ if not isinstance(config, PretrainedConfig):
514
+ kwargs_orig = copy.deepcopy(kwargs)
515
+ # ensure not to pollute the config object with torch_dtype="auto" - since it's
516
+ # meaningless in the context of the config object - torch.dtype values are acceptable
517
+ if kwargs.get("torch_dtype", None) == "auto":
518
+ _ = kwargs.pop("torch_dtype")
519
+ # to not overwrite the quantization_config if config has a quantization_config
520
+ if kwargs.get("quantization_config", None) is not None:
521
+ _ = kwargs.pop("quantization_config")
522
+
523
+ config, kwargs = AutoConfig.from_pretrained(
524
+ pretrained_model_name_or_path,
525
+ return_unused_kwargs=True,
526
+ trust_remote_code=trust_remote_code,
527
+ code_revision=code_revision,
528
+ _commit_hash=commit_hash,
529
+ **hub_kwargs,
530
+ **kwargs,
531
+ )
532
+
533
+ # if torch_dtype=auto was passed here, ensure to pass it on
534
+ if kwargs_orig.get("torch_dtype", None) == "auto":
535
+ kwargs["torch_dtype"] = "auto"
536
+ if kwargs_orig.get("quantization_config", None) is not None:
537
+ kwargs["quantization_config"] = kwargs_orig["quantization_config"]
538
+
539
+ has_remote_code = hasattr(config, "auto_map") and cls.__name__ in config.auto_map
540
+ has_local_code = type(config) in cls._model_mapping.keys()
541
+ trust_remote_code = resolve_trust_remote_code(
542
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
543
+ )
544
+
545
+ # Set the adapter kwargs
546
+ kwargs["adapter_kwargs"] = adapter_kwargs
547
+
548
+ if has_remote_code and trust_remote_code:
549
+ class_ref = config.auto_map[cls.__name__]
550
+ model_class = get_class_from_dynamic_module(
551
+ class_ref, pretrained_model_name_or_path, code_revision=code_revision, **hub_kwargs, **kwargs
552
+ )
553
+ _ = hub_kwargs.pop("code_revision", None)
554
+ if os.path.isdir(pretrained_model_name_or_path):
555
+ model_class.register_for_auto_class(cls.__name__)
556
+ else:
557
+ cls.register(config.__class__, model_class, exist_ok=True)
558
+ return model_class.from_pretrained(
559
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
560
+ )
561
+ elif type(config) in cls._model_mapping.keys():
562
+ model_class = _get_model_class(config, cls._model_mapping)
563
+ return model_class.from_pretrained(
564
+ pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
565
+ )
566
+ raise ValueError(
567
+ f"Unrecognized configuration class {config.__class__} for this kind of AutoModel: {cls.__name__}.\n"
568
+ f"Model type should be one of {', '.join(c.__name__ for c in cls._model_mapping.keys())}."
569
+ )
570
+
571
+ @classmethod
572
+ def register(cls, config_class, model_class, exist_ok=False):
573
+ """
574
+ Register a new model for this class.
575
+
576
+ Args:
577
+ config_class ([`PretrainedConfig`]):
578
+ The configuration corresponding to the model to register.
579
+ model_class ([`PreTrainedModel`]):
580
+ The model to register.
581
+ """
582
+ if hasattr(model_class, "config_class") and str(model_class.config_class) != str(config_class):
583
+ raise ValueError(
584
+ "The model class you are passing has a `config_class` attribute that is not consistent with the "
585
+ f"config class you passed (model has {model_class.config_class} and you passed {config_class}. Fix "
586
+ "one of those so they match!"
587
+ )
588
+ cls._model_mapping.register(config_class, model_class, exist_ok=exist_ok)
589
+
590
+
591
+ class _BaseAutoBackboneClass(_BaseAutoModelClass):
592
+ # Base class for auto backbone models.
593
+ _model_mapping = None
594
+
595
+ @classmethod
596
+ def _load_timm_backbone_from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
597
+ requires_backends(cls, ["vision", "timm"])
598
+ from ...models.timm_backbone import TimmBackboneConfig
599
+
600
+ config = kwargs.pop("config", TimmBackboneConfig())
601
+
602
+ if kwargs.get("out_features", None) is not None:
603
+ raise ValueError("Cannot specify `out_features` for timm backbones")
604
+
605
+ if kwargs.get("output_loading_info", False):
606
+ raise ValueError("Cannot specify `output_loading_info=True` when loading from timm")
607
+
608
+ num_channels = kwargs.pop("num_channels", config.num_channels)
609
+ features_only = kwargs.pop("features_only", config.features_only)
610
+ use_pretrained_backbone = kwargs.pop("use_pretrained_backbone", config.use_pretrained_backbone)
611
+ out_indices = kwargs.pop("out_indices", config.out_indices)
612
+ config = TimmBackboneConfig(
613
+ backbone=pretrained_model_name_or_path,
614
+ num_channels=num_channels,
615
+ features_only=features_only,
616
+ use_pretrained_backbone=use_pretrained_backbone,
617
+ out_indices=out_indices,
618
+ )
619
+ return super().from_config(config, **kwargs)
620
+
621
+ @classmethod
622
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
623
+ use_timm_backbone = kwargs.pop("use_timm_backbone", False)
624
+ if use_timm_backbone:
625
+ return cls._load_timm_backbone_from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
626
+
627
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
628
+
629
+
630
+ def insert_head_doc(docstring, head_doc=""):
631
+ if len(head_doc) > 0:
632
+ return docstring.replace(
633
+ "one of the model classes of the library ",
634
+ f"one of the model classes of the library (with a {head_doc} head) ",
635
+ )
636
+ return docstring.replace(
637
+ "one of the model classes of the library ", "one of the base model classes of the library "
638
+ )
639
+
640
+
641
+ def auto_class_update(cls, checkpoint_for_example="google-bert/bert-base-cased", head_doc=""):
642
+ # Create a new class with the right name from the base class
643
+ model_mapping = cls._model_mapping
644
+ name = cls.__name__
645
+ class_docstring = insert_head_doc(CLASS_DOCSTRING, head_doc=head_doc)
646
+ cls.__doc__ = class_docstring.replace("BaseAutoModelClass", name)
647
+
648
+ # Now we need to copy and re-register `from_config` and `from_pretrained` as class methods otherwise we can't
649
+ # have a specific docstrings for them.
650
+ from_config = copy_func(_BaseAutoModelClass.from_config)
651
+ from_config_docstring = insert_head_doc(FROM_CONFIG_DOCSTRING, head_doc=head_doc)
652
+ from_config_docstring = from_config_docstring.replace("BaseAutoModelClass", name)
653
+ from_config_docstring = from_config_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
654
+ from_config.__doc__ = from_config_docstring
655
+ from_config = replace_list_option_in_docstrings(model_mapping._model_mapping, use_model_types=False)(from_config)
656
+ cls.from_config = classmethod(from_config)
657
+
658
+ if name.startswith("TF"):
659
+ from_pretrained_docstring = FROM_PRETRAINED_TF_DOCSTRING
660
+ elif name.startswith("Flax"):
661
+ from_pretrained_docstring = FROM_PRETRAINED_FLAX_DOCSTRING
662
+ else:
663
+ from_pretrained_docstring = FROM_PRETRAINED_TORCH_DOCSTRING
664
+ from_pretrained = copy_func(_BaseAutoModelClass.from_pretrained)
665
+ from_pretrained_docstring = insert_head_doc(from_pretrained_docstring, head_doc=head_doc)
666
+ from_pretrained_docstring = from_pretrained_docstring.replace("BaseAutoModelClass", name)
667
+ from_pretrained_docstring = from_pretrained_docstring.replace("checkpoint_placeholder", checkpoint_for_example)
668
+ shortcut = checkpoint_for_example.split("/")[-1].split("-")[0]
669
+ from_pretrained_docstring = from_pretrained_docstring.replace("shortcut_placeholder", shortcut)
670
+ from_pretrained.__doc__ = from_pretrained_docstring
671
+ from_pretrained = replace_list_option_in_docstrings(model_mapping._model_mapping)(from_pretrained)
672
+ cls.from_pretrained = classmethod(from_pretrained)
673
+ return cls
674
+
675
+
676
+ def get_values(model_mapping):
677
+ result = []
678
+ for model in model_mapping.values():
679
+ if isinstance(model, (list, tuple)):
680
+ result += list(model)
681
+ else:
682
+ result.append(model)
683
+
684
+ return result
685
+
686
+
687
+ def getattribute_from_module(module, attr):
688
+ if attr is None:
689
+ return None
690
+ if isinstance(attr, tuple):
691
+ return tuple(getattribute_from_module(module, a) for a in attr)
692
+ if hasattr(module, attr):
693
+ return getattr(module, attr)
694
+ # Some of the mappings have entries model_type -> object of another model type. In that case we try to grab the
695
+ # object at the top level.
696
+ transformers_module = importlib.import_module("transformers")
697
+
698
+ if module != transformers_module:
699
+ try:
700
+ return getattribute_from_module(transformers_module, attr)
701
+ except ValueError:
702
+ raise ValueError(f"Could not find {attr} neither in {module} nor in {transformers_module}!")
703
+ else:
704
+ raise ValueError(f"Could not find {attr} in {transformers_module}!")
705
+
706
+
707
+ class _LazyAutoMapping(OrderedDict):
708
+ """
709
+ " A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.
710
+
711
+ Args:
712
+ - config_mapping: The map model type to config class
713
+ - model_mapping: The map model type to model (or tokenizer) class
714
+ """
715
+
716
+ def __init__(self, config_mapping, model_mapping):
717
+ self._config_mapping = config_mapping
718
+ self._reverse_config_mapping = {v: k for k, v in config_mapping.items()}
719
+ self._model_mapping = model_mapping
720
+ self._model_mapping._model_mapping = self
721
+ self._extra_content = {}
722
+ self._modules = {}
723
+
724
+ def __len__(self):
725
+ common_keys = set(self._config_mapping.keys()).intersection(self._model_mapping.keys())
726
+ return len(common_keys) + len(self._extra_content)
727
+
728
+ def __getitem__(self, key):
729
+ if key in self._extra_content:
730
+ return self._extra_content[key]
731
+ model_type = self._reverse_config_mapping[key.__name__]
732
+ if model_type in self._model_mapping:
733
+ model_name = self._model_mapping[model_type]
734
+ return self._load_attr_from_module(model_type, model_name)
735
+
736
+ # Maybe there was several model types associated with this config.
737
+ model_types = [k for k, v in self._config_mapping.items() if v == key.__name__]
738
+ for mtype in model_types:
739
+ if mtype in self._model_mapping:
740
+ model_name = self._model_mapping[mtype]
741
+ return self._load_attr_from_module(mtype, model_name)
742
+ raise KeyError(key)
743
+
744
+ def _load_attr_from_module(self, model_type, attr):
745
+ module_name = model_type_to_module_name(model_type)
746
+ if module_name not in self._modules:
747
+ self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models")
748
+ return getattribute_from_module(self._modules[module_name], attr)
749
+
750
+ def keys(self):
751
+ mapping_keys = [
752
+ self._load_attr_from_module(key, name)
753
+ for key, name in self._config_mapping.items()
754
+ if key in self._model_mapping.keys()
755
+ ]
756
+ return mapping_keys + list(self._extra_content.keys())
757
+
758
+ def get(self, key, default):
759
+ try:
760
+ return self.__getitem__(key)
761
+ except KeyError:
762
+ return default
763
+
764
+ def __bool__(self):
765
+ return bool(self.keys())
766
+
767
+ def values(self):
768
+ mapping_values = [
769
+ self._load_attr_from_module(key, name)
770
+ for key, name in self._model_mapping.items()
771
+ if key in self._config_mapping.keys()
772
+ ]
773
+ return mapping_values + list(self._extra_content.values())
774
+
775
+ def items(self):
776
+ mapping_items = [
777
+ (
778
+ self._load_attr_from_module(key, self._config_mapping[key]),
779
+ self._load_attr_from_module(key, self._model_mapping[key]),
780
+ )
781
+ for key in self._model_mapping.keys()
782
+ if key in self._config_mapping.keys()
783
+ ]
784
+ return mapping_items + list(self._extra_content.items())
785
+
786
+ def __iter__(self):
787
+ return iter(self.keys())
788
+
789
+ def __contains__(self, item):
790
+ if item in self._extra_content:
791
+ return True
792
+ if not hasattr(item, "__name__") or item.__name__ not in self._reverse_config_mapping:
793
+ return False
794
+ model_type = self._reverse_config_mapping[item.__name__]
795
+ return model_type in self._model_mapping
796
+
797
+ def register(self, key, value, exist_ok=False):
798
+ """
799
+ Register a new model in this mapping.
800
+ """
801
+ if hasattr(key, "__name__") and key.__name__ in self._reverse_config_mapping:
802
+ model_type = self._reverse_config_mapping[key.__name__]
803
+ if model_type in self._model_mapping.keys() and not exist_ok:
804
+ raise ValueError(f"'{key}' is already used by a Transformers model.")
805
+
806
+ self._extra_content[key] = value
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/configuration_auto.py ADDED
@@ -0,0 +1,984 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Auto Config class."""
16
+ import importlib
17
+ import os
18
+ import re
19
+ import warnings
20
+ from collections import OrderedDict
21
+ from typing import List, Union
22
+
23
+ from ...configuration_utils import PretrainedConfig
24
+ from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
25
+ from ...utils import CONFIG_NAME, logging
26
+
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import CONFIG_ARCHIVE_MAP_MAPPING_NAMES # noqa: F401, E402
32
+
33
+
34
+ CONFIG_MAPPING_NAMES = OrderedDict(
35
+ [
36
+ # Add configs here
37
+ ("albert", "AlbertConfig"),
38
+ ("align", "AlignConfig"),
39
+ ("altclip", "AltCLIPConfig"),
40
+ ("audio-spectrogram-transformer", "ASTConfig"),
41
+ ("autoformer", "AutoformerConfig"),
42
+ ("bark", "BarkConfig"),
43
+ ("bart", "BartConfig"),
44
+ ("beit", "BeitConfig"),
45
+ ("bert", "BertConfig"),
46
+ ("bert-generation", "BertGenerationConfig"),
47
+ ("big_bird", "BigBirdConfig"),
48
+ ("bigbird_pegasus", "BigBirdPegasusConfig"),
49
+ ("biogpt", "BioGptConfig"),
50
+ ("bit", "BitConfig"),
51
+ ("blenderbot", "BlenderbotConfig"),
52
+ ("blenderbot-small", "BlenderbotSmallConfig"),
53
+ ("blip", "BlipConfig"),
54
+ ("blip-2", "Blip2Config"),
55
+ ("bloom", "BloomConfig"),
56
+ ("bridgetower", "BridgeTowerConfig"),
57
+ ("bros", "BrosConfig"),
58
+ ("camembert", "CamembertConfig"),
59
+ ("canine", "CanineConfig"),
60
+ ("chinese_clip", "ChineseCLIPConfig"),
61
+ ("chinese_clip_vision_model", "ChineseCLIPVisionConfig"),
62
+ ("clap", "ClapConfig"),
63
+ ("clip", "CLIPConfig"),
64
+ ("clip_vision_model", "CLIPVisionConfig"),
65
+ ("clipseg", "CLIPSegConfig"),
66
+ ("clvp", "ClvpConfig"),
67
+ ("code_llama", "LlamaConfig"),
68
+ ("codegen", "CodeGenConfig"),
69
+ ("cohere", "CohereConfig"),
70
+ ("conditional_detr", "ConditionalDetrConfig"),
71
+ ("convbert", "ConvBertConfig"),
72
+ ("convnext", "ConvNextConfig"),
73
+ ("convnextv2", "ConvNextV2Config"),
74
+ ("cpmant", "CpmAntConfig"),
75
+ ("ctrl", "CTRLConfig"),
76
+ ("cvt", "CvtConfig"),
77
+ ("data2vec-audio", "Data2VecAudioConfig"),
78
+ ("data2vec-text", "Data2VecTextConfig"),
79
+ ("data2vec-vision", "Data2VecVisionConfig"),
80
+ ("dbrx", "DbrxConfig"),
81
+ ("deberta", "DebertaConfig"),
82
+ ("deberta-v2", "DebertaV2Config"),
83
+ ("decision_transformer", "DecisionTransformerConfig"),
84
+ ("deformable_detr", "DeformableDetrConfig"),
85
+ ("deit", "DeiTConfig"),
86
+ ("depth_anything", "DepthAnythingConfig"),
87
+ ("deta", "DetaConfig"),
88
+ ("detr", "DetrConfig"),
89
+ ("dinat", "DinatConfig"),
90
+ ("dinov2", "Dinov2Config"),
91
+ ("distilbert", "DistilBertConfig"),
92
+ ("donut-swin", "DonutSwinConfig"),
93
+ ("dpr", "DPRConfig"),
94
+ ("dpt", "DPTConfig"),
95
+ ("efficientformer", "EfficientFormerConfig"),
96
+ ("efficientnet", "EfficientNetConfig"),
97
+ ("electra", "ElectraConfig"),
98
+ ("encodec", "EncodecConfig"),
99
+ ("encoder-decoder", "EncoderDecoderConfig"),
100
+ ("ernie", "ErnieConfig"),
101
+ ("ernie_m", "ErnieMConfig"),
102
+ ("esm", "EsmConfig"),
103
+ ("falcon", "FalconConfig"),
104
+ ("fastspeech2_conformer", "FastSpeech2ConformerConfig"),
105
+ ("flaubert", "FlaubertConfig"),
106
+ ("flava", "FlavaConfig"),
107
+ ("fnet", "FNetConfig"),
108
+ ("focalnet", "FocalNetConfig"),
109
+ ("fsmt", "FSMTConfig"),
110
+ ("funnel", "FunnelConfig"),
111
+ ("fuyu", "FuyuConfig"),
112
+ ("gemma", "GemmaConfig"),
113
+ ("git", "GitConfig"),
114
+ ("glpn", "GLPNConfig"),
115
+ ("gpt-sw3", "GPT2Config"),
116
+ ("gpt2", "GPT2Config"),
117
+ ("gpt_bigcode", "GPTBigCodeConfig"),
118
+ ("gpt_neo", "GPTNeoConfig"),
119
+ ("gpt_neox", "GPTNeoXConfig"),
120
+ ("gpt_neox_japanese", "GPTNeoXJapaneseConfig"),
121
+ ("gptj", "GPTJConfig"),
122
+ ("gptsan-japanese", "GPTSanJapaneseConfig"),
123
+ ("graphormer", "GraphormerConfig"),
124
+ ("grounding-dino", "GroundingDinoConfig"),
125
+ ("groupvit", "GroupViTConfig"),
126
+ ("hubert", "HubertConfig"),
127
+ ("ibert", "IBertConfig"),
128
+ ("idefics", "IdeficsConfig"),
129
+ ("idefics2", "Idefics2Config"),
130
+ ("imagegpt", "ImageGPTConfig"),
131
+ ("informer", "InformerConfig"),
132
+ ("instructblip", "InstructBlipConfig"),
133
+ ("jamba", "JambaConfig"),
134
+ ("jukebox", "JukeboxConfig"),
135
+ ("kosmos-2", "Kosmos2Config"),
136
+ ("layoutlm", "LayoutLMConfig"),
137
+ ("layoutlmv2", "LayoutLMv2Config"),
138
+ ("layoutlmv3", "LayoutLMv3Config"),
139
+ ("led", "LEDConfig"),
140
+ ("levit", "LevitConfig"),
141
+ ("lilt", "LiltConfig"),
142
+ ("llama", "LlamaConfig"),
143
+ ("llava", "LlavaConfig"),
144
+ ("llava_next", "LlavaNextConfig"),
145
+ ("longformer", "LongformerConfig"),
146
+ ("longt5", "LongT5Config"),
147
+ ("luke", "LukeConfig"),
148
+ ("lxmert", "LxmertConfig"),
149
+ ("m2m_100", "M2M100Config"),
150
+ ("mamba", "MambaConfig"),
151
+ ("marian", "MarianConfig"),
152
+ ("markuplm", "MarkupLMConfig"),
153
+ ("mask2former", "Mask2FormerConfig"),
154
+ ("maskformer", "MaskFormerConfig"),
155
+ ("maskformer-swin", "MaskFormerSwinConfig"),
156
+ ("mbart", "MBartConfig"),
157
+ ("mctct", "MCTCTConfig"),
158
+ ("mega", "MegaConfig"),
159
+ ("megatron-bert", "MegatronBertConfig"),
160
+ ("mgp-str", "MgpstrConfig"),
161
+ ("mistral", "MistralConfig"),
162
+ ("mixtral", "MixtralConfig"),
163
+ ("mobilebert", "MobileBertConfig"),
164
+ ("mobilenet_v1", "MobileNetV1Config"),
165
+ ("mobilenet_v2", "MobileNetV2Config"),
166
+ ("mobilevit", "MobileViTConfig"),
167
+ ("mobilevitv2", "MobileViTV2Config"),
168
+ ("mpnet", "MPNetConfig"),
169
+ ("mpt", "MptConfig"),
170
+ ("mra", "MraConfig"),
171
+ ("mt5", "MT5Config"),
172
+ ("musicgen", "MusicgenConfig"),
173
+ ("musicgen_melody", "MusicgenMelodyConfig"),
174
+ ("mvp", "MvpConfig"),
175
+ ("nat", "NatConfig"),
176
+ ("nezha", "NezhaConfig"),
177
+ ("nllb-moe", "NllbMoeConfig"),
178
+ ("nougat", "VisionEncoderDecoderConfig"),
179
+ ("nystromformer", "NystromformerConfig"),
180
+ ("olmo", "OlmoConfig"),
181
+ ("oneformer", "OneFormerConfig"),
182
+ ("open-llama", "OpenLlamaConfig"),
183
+ ("openai-gpt", "OpenAIGPTConfig"),
184
+ ("opt", "OPTConfig"),
185
+ ("owlv2", "Owlv2Config"),
186
+ ("owlvit", "OwlViTConfig"),
187
+ ("patchtsmixer", "PatchTSMixerConfig"),
188
+ ("patchtst", "PatchTSTConfig"),
189
+ ("pegasus", "PegasusConfig"),
190
+ ("pegasus_x", "PegasusXConfig"),
191
+ ("perceiver", "PerceiverConfig"),
192
+ ("persimmon", "PersimmonConfig"),
193
+ ("phi", "PhiConfig"),
194
+ ("pix2struct", "Pix2StructConfig"),
195
+ ("plbart", "PLBartConfig"),
196
+ ("poolformer", "PoolFormerConfig"),
197
+ ("pop2piano", "Pop2PianoConfig"),
198
+ ("prophetnet", "ProphetNetConfig"),
199
+ ("pvt", "PvtConfig"),
200
+ ("pvt_v2", "PvtV2Config"),
201
+ ("qdqbert", "QDQBertConfig"),
202
+ ("qwen2", "Qwen2Config"),
203
+ ("qwen2_moe", "Qwen2MoeConfig"),
204
+ ("rag", "RagConfig"),
205
+ ("realm", "RealmConfig"),
206
+ ("recurrent_gemma", "RecurrentGemmaConfig"),
207
+ ("reformer", "ReformerConfig"),
208
+ ("regnet", "RegNetConfig"),
209
+ ("rembert", "RemBertConfig"),
210
+ ("resnet", "ResNetConfig"),
211
+ ("retribert", "RetriBertConfig"),
212
+ ("roberta", "RobertaConfig"),
213
+ ("roberta-prelayernorm", "RobertaPreLayerNormConfig"),
214
+ ("roc_bert", "RoCBertConfig"),
215
+ ("roformer", "RoFormerConfig"),
216
+ ("rwkv", "RwkvConfig"),
217
+ ("sam", "SamConfig"),
218
+ ("seamless_m4t", "SeamlessM4TConfig"),
219
+ ("seamless_m4t_v2", "SeamlessM4Tv2Config"),
220
+ ("segformer", "SegformerConfig"),
221
+ ("seggpt", "SegGptConfig"),
222
+ ("sew", "SEWConfig"),
223
+ ("sew-d", "SEWDConfig"),
224
+ ("siglip", "SiglipConfig"),
225
+ ("siglip_vision_model", "SiglipVisionConfig"),
226
+ ("speech-encoder-decoder", "SpeechEncoderDecoderConfig"),
227
+ ("speech_to_text", "Speech2TextConfig"),
228
+ ("speech_to_text_2", "Speech2Text2Config"),
229
+ ("speecht5", "SpeechT5Config"),
230
+ ("splinter", "SplinterConfig"),
231
+ ("squeezebert", "SqueezeBertConfig"),
232
+ ("stablelm", "StableLmConfig"),
233
+ ("starcoder2", "Starcoder2Config"),
234
+ ("superpoint", "SuperPointConfig"),
235
+ ("swiftformer", "SwiftFormerConfig"),
236
+ ("swin", "SwinConfig"),
237
+ ("swin2sr", "Swin2SRConfig"),
238
+ ("swinv2", "Swinv2Config"),
239
+ ("switch_transformers", "SwitchTransformersConfig"),
240
+ ("t5", "T5Config"),
241
+ ("table-transformer", "TableTransformerConfig"),
242
+ ("tapas", "TapasConfig"),
243
+ ("time_series_transformer", "TimeSeriesTransformerConfig"),
244
+ ("timesformer", "TimesformerConfig"),
245
+ ("timm_backbone", "TimmBackboneConfig"),
246
+ ("trajectory_transformer", "TrajectoryTransformerConfig"),
247
+ ("transfo-xl", "TransfoXLConfig"),
248
+ ("trocr", "TrOCRConfig"),
249
+ ("tvlt", "TvltConfig"),
250
+ ("tvp", "TvpConfig"),
251
+ ("udop", "UdopConfig"),
252
+ ("umt5", "UMT5Config"),
253
+ ("unispeech", "UniSpeechConfig"),
254
+ ("unispeech-sat", "UniSpeechSatConfig"),
255
+ ("univnet", "UnivNetConfig"),
256
+ ("upernet", "UperNetConfig"),
257
+ ("van", "VanConfig"),
258
+ ("videomae", "VideoMAEConfig"),
259
+ ("vilt", "ViltConfig"),
260
+ ("vipllava", "VipLlavaConfig"),
261
+ ("vision-encoder-decoder", "VisionEncoderDecoderConfig"),
262
+ ("vision-text-dual-encoder", "VisionTextDualEncoderConfig"),
263
+ ("visual_bert", "VisualBertConfig"),
264
+ ("vit", "ViTConfig"),
265
+ ("vit_hybrid", "ViTHybridConfig"),
266
+ ("vit_mae", "ViTMAEConfig"),
267
+ ("vit_msn", "ViTMSNConfig"),
268
+ ("vitdet", "VitDetConfig"),
269
+ ("vitmatte", "VitMatteConfig"),
270
+ ("vits", "VitsConfig"),
271
+ ("vivit", "VivitConfig"),
272
+ ("wav2vec2", "Wav2Vec2Config"),
273
+ ("wav2vec2-bert", "Wav2Vec2BertConfig"),
274
+ ("wav2vec2-conformer", "Wav2Vec2ConformerConfig"),
275
+ ("wavlm", "WavLMConfig"),
276
+ ("whisper", "WhisperConfig"),
277
+ ("xclip", "XCLIPConfig"),
278
+ ("xglm", "XGLMConfig"),
279
+ ("xlm", "XLMConfig"),
280
+ ("xlm-prophetnet", "XLMProphetNetConfig"),
281
+ ("xlm-roberta", "XLMRobertaConfig"),
282
+ ("xlm-roberta-xl", "XLMRobertaXLConfig"),
283
+ ("xlnet", "XLNetConfig"),
284
+ ("xmod", "XmodConfig"),
285
+ ("yolos", "YolosConfig"),
286
+ ("yoso", "YosoConfig"),
287
+ ]
288
+ )
289
+
290
+
291
+ MODEL_NAMES_MAPPING = OrderedDict(
292
+ [
293
+ # Add full (and cased) model names here
294
+ ("albert", "ALBERT"),
295
+ ("align", "ALIGN"),
296
+ ("altclip", "AltCLIP"),
297
+ ("audio-spectrogram-transformer", "Audio Spectrogram Transformer"),
298
+ ("autoformer", "Autoformer"),
299
+ ("bark", "Bark"),
300
+ ("bart", "BART"),
301
+ ("barthez", "BARThez"),
302
+ ("bartpho", "BARTpho"),
303
+ ("beit", "BEiT"),
304
+ ("bert", "BERT"),
305
+ ("bert-generation", "Bert Generation"),
306
+ ("bert-japanese", "BertJapanese"),
307
+ ("bertweet", "BERTweet"),
308
+ ("big_bird", "BigBird"),
309
+ ("bigbird_pegasus", "BigBird-Pegasus"),
310
+ ("biogpt", "BioGpt"),
311
+ ("bit", "BiT"),
312
+ ("blenderbot", "Blenderbot"),
313
+ ("blenderbot-small", "BlenderbotSmall"),
314
+ ("blip", "BLIP"),
315
+ ("blip-2", "BLIP-2"),
316
+ ("bloom", "BLOOM"),
317
+ ("bort", "BORT"),
318
+ ("bridgetower", "BridgeTower"),
319
+ ("bros", "BROS"),
320
+ ("byt5", "ByT5"),
321
+ ("camembert", "CamemBERT"),
322
+ ("canine", "CANINE"),
323
+ ("chinese_clip", "Chinese-CLIP"),
324
+ ("chinese_clip_vision_model", "ChineseCLIPVisionModel"),
325
+ ("clap", "CLAP"),
326
+ ("clip", "CLIP"),
327
+ ("clip_vision_model", "CLIPVisionModel"),
328
+ ("clipseg", "CLIPSeg"),
329
+ ("clvp", "CLVP"),
330
+ ("code_llama", "CodeLlama"),
331
+ ("codegen", "CodeGen"),
332
+ ("cohere", "Cohere"),
333
+ ("conditional_detr", "Conditional DETR"),
334
+ ("convbert", "ConvBERT"),
335
+ ("convnext", "ConvNeXT"),
336
+ ("convnextv2", "ConvNeXTV2"),
337
+ ("cpm", "CPM"),
338
+ ("cpmant", "CPM-Ant"),
339
+ ("ctrl", "CTRL"),
340
+ ("cvt", "CvT"),
341
+ ("data2vec-audio", "Data2VecAudio"),
342
+ ("data2vec-text", "Data2VecText"),
343
+ ("data2vec-vision", "Data2VecVision"),
344
+ ("dbrx", "DBRX"),
345
+ ("deberta", "DeBERTa"),
346
+ ("deberta-v2", "DeBERTa-v2"),
347
+ ("decision_transformer", "Decision Transformer"),
348
+ ("deformable_detr", "Deformable DETR"),
349
+ ("deit", "DeiT"),
350
+ ("deplot", "DePlot"),
351
+ ("depth_anything", "Depth Anything"),
352
+ ("deta", "DETA"),
353
+ ("detr", "DETR"),
354
+ ("dialogpt", "DialoGPT"),
355
+ ("dinat", "DiNAT"),
356
+ ("dinov2", "DINOv2"),
357
+ ("distilbert", "DistilBERT"),
358
+ ("dit", "DiT"),
359
+ ("donut-swin", "DonutSwin"),
360
+ ("dpr", "DPR"),
361
+ ("dpt", "DPT"),
362
+ ("efficientformer", "EfficientFormer"),
363
+ ("efficientnet", "EfficientNet"),
364
+ ("electra", "ELECTRA"),
365
+ ("encodec", "EnCodec"),
366
+ ("encoder-decoder", "Encoder decoder"),
367
+ ("ernie", "ERNIE"),
368
+ ("ernie_m", "ErnieM"),
369
+ ("esm", "ESM"),
370
+ ("falcon", "Falcon"),
371
+ ("fastspeech2_conformer", "FastSpeech2Conformer"),
372
+ ("flan-t5", "FLAN-T5"),
373
+ ("flan-ul2", "FLAN-UL2"),
374
+ ("flaubert", "FlauBERT"),
375
+ ("flava", "FLAVA"),
376
+ ("fnet", "FNet"),
377
+ ("focalnet", "FocalNet"),
378
+ ("fsmt", "FairSeq Machine-Translation"),
379
+ ("funnel", "Funnel Transformer"),
380
+ ("fuyu", "Fuyu"),
381
+ ("gemma", "Gemma"),
382
+ ("git", "GIT"),
383
+ ("glpn", "GLPN"),
384
+ ("gpt-sw3", "GPT-Sw3"),
385
+ ("gpt2", "OpenAI GPT-2"),
386
+ ("gpt_bigcode", "GPTBigCode"),
387
+ ("gpt_neo", "GPT Neo"),
388
+ ("gpt_neox", "GPT NeoX"),
389
+ ("gpt_neox_japanese", "GPT NeoX Japanese"),
390
+ ("gptj", "GPT-J"),
391
+ ("gptsan-japanese", "GPTSAN-japanese"),
392
+ ("graphormer", "Graphormer"),
393
+ ("grounding-dino", "Grounding DINO"),
394
+ ("groupvit", "GroupViT"),
395
+ ("herbert", "HerBERT"),
396
+ ("hubert", "Hubert"),
397
+ ("ibert", "I-BERT"),
398
+ ("idefics", "IDEFICS"),
399
+ ("idefics2", "Idefics2"),
400
+ ("imagegpt", "ImageGPT"),
401
+ ("informer", "Informer"),
402
+ ("instructblip", "InstructBLIP"),
403
+ ("jamba", "Jamba"),
404
+ ("jukebox", "Jukebox"),
405
+ ("kosmos-2", "KOSMOS-2"),
406
+ ("layoutlm", "LayoutLM"),
407
+ ("layoutlmv2", "LayoutLMv2"),
408
+ ("layoutlmv3", "LayoutLMv3"),
409
+ ("layoutxlm", "LayoutXLM"),
410
+ ("led", "LED"),
411
+ ("levit", "LeViT"),
412
+ ("lilt", "LiLT"),
413
+ ("llama", "LLaMA"),
414
+ ("llama2", "Llama2"),
415
+ ("llava", "LLaVa"),
416
+ ("llava_next", "LLaVA-NeXT"),
417
+ ("longformer", "Longformer"),
418
+ ("longt5", "LongT5"),
419
+ ("luke", "LUKE"),
420
+ ("lxmert", "LXMERT"),
421
+ ("m2m_100", "M2M100"),
422
+ ("madlad-400", "MADLAD-400"),
423
+ ("mamba", "Mamba"),
424
+ ("marian", "Marian"),
425
+ ("markuplm", "MarkupLM"),
426
+ ("mask2former", "Mask2Former"),
427
+ ("maskformer", "MaskFormer"),
428
+ ("maskformer-swin", "MaskFormerSwin"),
429
+ ("matcha", "MatCha"),
430
+ ("mbart", "mBART"),
431
+ ("mbart50", "mBART-50"),
432
+ ("mctct", "M-CTC-T"),
433
+ ("mega", "MEGA"),
434
+ ("megatron-bert", "Megatron-BERT"),
435
+ ("megatron_gpt2", "Megatron-GPT2"),
436
+ ("mgp-str", "MGP-STR"),
437
+ ("mistral", "Mistral"),
438
+ ("mixtral", "Mixtral"),
439
+ ("mluke", "mLUKE"),
440
+ ("mms", "MMS"),
441
+ ("mobilebert", "MobileBERT"),
442
+ ("mobilenet_v1", "MobileNetV1"),
443
+ ("mobilenet_v2", "MobileNetV2"),
444
+ ("mobilevit", "MobileViT"),
445
+ ("mobilevitv2", "MobileViTV2"),
446
+ ("mpnet", "MPNet"),
447
+ ("mpt", "MPT"),
448
+ ("mra", "MRA"),
449
+ ("mt5", "MT5"),
450
+ ("musicgen", "MusicGen"),
451
+ ("musicgen_melody", "MusicGen Melody"),
452
+ ("mvp", "MVP"),
453
+ ("nat", "NAT"),
454
+ ("nezha", "Nezha"),
455
+ ("nllb", "NLLB"),
456
+ ("nllb-moe", "NLLB-MOE"),
457
+ ("nougat", "Nougat"),
458
+ ("nystromformer", "Nyströmformer"),
459
+ ("olmo", "OLMo"),
460
+ ("oneformer", "OneFormer"),
461
+ ("open-llama", "OpenLlama"),
462
+ ("openai-gpt", "OpenAI GPT"),
463
+ ("opt", "OPT"),
464
+ ("owlv2", "OWLv2"),
465
+ ("owlvit", "OWL-ViT"),
466
+ ("patchtsmixer", "PatchTSMixer"),
467
+ ("patchtst", "PatchTST"),
468
+ ("pegasus", "Pegasus"),
469
+ ("pegasus_x", "PEGASUS-X"),
470
+ ("perceiver", "Perceiver"),
471
+ ("persimmon", "Persimmon"),
472
+ ("phi", "Phi"),
473
+ ("phobert", "PhoBERT"),
474
+ ("pix2struct", "Pix2Struct"),
475
+ ("plbart", "PLBart"),
476
+ ("poolformer", "PoolFormer"),
477
+ ("pop2piano", "Pop2Piano"),
478
+ ("prophetnet", "ProphetNet"),
479
+ ("pvt", "PVT"),
480
+ ("pvt_v2", "PVTv2"),
481
+ ("qdqbert", "QDQBert"),
482
+ ("qwen2", "Qwen2"),
483
+ ("qwen2_moe", "Qwen2MoE"),
484
+ ("rag", "RAG"),
485
+ ("realm", "REALM"),
486
+ ("recurrent_gemma", "RecurrentGemma"),
487
+ ("reformer", "Reformer"),
488
+ ("regnet", "RegNet"),
489
+ ("rembert", "RemBERT"),
490
+ ("resnet", "ResNet"),
491
+ ("retribert", "RetriBERT"),
492
+ ("roberta", "RoBERTa"),
493
+ ("roberta-prelayernorm", "RoBERTa-PreLayerNorm"),
494
+ ("roc_bert", "RoCBert"),
495
+ ("roformer", "RoFormer"),
496
+ ("rwkv", "RWKV"),
497
+ ("sam", "SAM"),
498
+ ("seamless_m4t", "SeamlessM4T"),
499
+ ("seamless_m4t_v2", "SeamlessM4Tv2"),
500
+ ("segformer", "SegFormer"),
501
+ ("seggpt", "SegGPT"),
502
+ ("sew", "SEW"),
503
+ ("sew-d", "SEW-D"),
504
+ ("siglip", "SigLIP"),
505
+ ("siglip_vision_model", "SiglipVisionModel"),
506
+ ("speech-encoder-decoder", "Speech Encoder decoder"),
507
+ ("speech_to_text", "Speech2Text"),
508
+ ("speech_to_text_2", "Speech2Text2"),
509
+ ("speecht5", "SpeechT5"),
510
+ ("splinter", "Splinter"),
511
+ ("squeezebert", "SqueezeBERT"),
512
+ ("stablelm", "StableLm"),
513
+ ("starcoder2", "Starcoder2"),
514
+ ("superpoint", "SuperPoint"),
515
+ ("swiftformer", "SwiftFormer"),
516
+ ("swin", "Swin Transformer"),
517
+ ("swin2sr", "Swin2SR"),
518
+ ("swinv2", "Swin Transformer V2"),
519
+ ("switch_transformers", "SwitchTransformers"),
520
+ ("t5", "T5"),
521
+ ("t5v1.1", "T5v1.1"),
522
+ ("table-transformer", "Table Transformer"),
523
+ ("tapas", "TAPAS"),
524
+ ("tapex", "TAPEX"),
525
+ ("time_series_transformer", "Time Series Transformer"),
526
+ ("timesformer", "TimeSformer"),
527
+ ("timm_backbone", "TimmBackbone"),
528
+ ("trajectory_transformer", "Trajectory Transformer"),
529
+ ("transfo-xl", "Transformer-XL"),
530
+ ("trocr", "TrOCR"),
531
+ ("tvlt", "TVLT"),
532
+ ("tvp", "TVP"),
533
+ ("udop", "UDOP"),
534
+ ("ul2", "UL2"),
535
+ ("umt5", "UMT5"),
536
+ ("unispeech", "UniSpeech"),
537
+ ("unispeech-sat", "UniSpeechSat"),
538
+ ("univnet", "UnivNet"),
539
+ ("upernet", "UPerNet"),
540
+ ("van", "VAN"),
541
+ ("videomae", "VideoMAE"),
542
+ ("vilt", "ViLT"),
543
+ ("vipllava", "VipLlava"),
544
+ ("vision-encoder-decoder", "Vision Encoder decoder"),
545
+ ("vision-text-dual-encoder", "VisionTextDualEncoder"),
546
+ ("visual_bert", "VisualBERT"),
547
+ ("vit", "ViT"),
548
+ ("vit_hybrid", "ViT Hybrid"),
549
+ ("vit_mae", "ViTMAE"),
550
+ ("vit_msn", "ViTMSN"),
551
+ ("vitdet", "VitDet"),
552
+ ("vitmatte", "ViTMatte"),
553
+ ("vits", "VITS"),
554
+ ("vivit", "ViViT"),
555
+ ("wav2vec2", "Wav2Vec2"),
556
+ ("wav2vec2-bert", "Wav2Vec2-BERT"),
557
+ ("wav2vec2-conformer", "Wav2Vec2-Conformer"),
558
+ ("wav2vec2_phoneme", "Wav2Vec2Phoneme"),
559
+ ("wavlm", "WavLM"),
560
+ ("whisper", "Whisper"),
561
+ ("xclip", "X-CLIP"),
562
+ ("xglm", "XGLM"),
563
+ ("xlm", "XLM"),
564
+ ("xlm-prophetnet", "XLM-ProphetNet"),
565
+ ("xlm-roberta", "XLM-RoBERTa"),
566
+ ("xlm-roberta-xl", "XLM-RoBERTa-XL"),
567
+ ("xlm-v", "XLM-V"),
568
+ ("xlnet", "XLNet"),
569
+ ("xls_r", "XLS-R"),
570
+ ("xlsr_wav2vec2", "XLSR-Wav2Vec2"),
571
+ ("xmod", "X-MOD"),
572
+ ("yolos", "YOLOS"),
573
+ ("yoso", "YOSO"),
574
+ ]
575
+ )
576
+
577
+ # This is tied to the processing `-` -> `_` in `model_type_to_module_name`. For example, instead of putting
578
+ # `transfo-xl` (as in `CONFIG_MAPPING_NAMES`), we should use `transfo_xl`.
579
+ DEPRECATED_MODELS = [
580
+ "bort",
581
+ "mctct",
582
+ "mmbt",
583
+ "open_llama",
584
+ "retribert",
585
+ "tapex",
586
+ "trajectory_transformer",
587
+ "transfo_xl",
588
+ "van",
589
+ ]
590
+
591
+ SPECIAL_MODEL_TYPE_TO_MODULE_NAME = OrderedDict(
592
+ [
593
+ ("openai-gpt", "openai"),
594
+ ("data2vec-audio", "data2vec"),
595
+ ("data2vec-text", "data2vec"),
596
+ ("data2vec-vision", "data2vec"),
597
+ ("donut-swin", "donut"),
598
+ ("kosmos-2", "kosmos2"),
599
+ ("maskformer-swin", "maskformer"),
600
+ ("xclip", "x_clip"),
601
+ ("clip_vision_model", "clip"),
602
+ ("siglip_vision_model", "siglip"),
603
+ ("chinese_clip_vision_model", "chinese_clip"),
604
+ ]
605
+ )
606
+
607
+
608
+ def model_type_to_module_name(key):
609
+ """Converts a config key to the corresponding module."""
610
+ # Special treatment
611
+ if key in SPECIAL_MODEL_TYPE_TO_MODULE_NAME:
612
+ return SPECIAL_MODEL_TYPE_TO_MODULE_NAME[key]
613
+
614
+ key = key.replace("-", "_")
615
+ if key in DEPRECATED_MODELS:
616
+ key = f"deprecated.{key}"
617
+
618
+ return key
619
+
620
+
621
+ def config_class_to_model_type(config):
622
+ """Converts a config class name to the corresponding model type"""
623
+ for key, cls in CONFIG_MAPPING_NAMES.items():
624
+ if cls == config:
625
+ return key
626
+ # if key not found check in extra content
627
+ for key, cls in CONFIG_MAPPING._extra_content.items():
628
+ if cls.__name__ == config:
629
+ return key
630
+ return None
631
+
632
+
633
+ class _LazyConfigMapping(OrderedDict):
634
+ """
635
+ A dictionary that lazily load its values when they are requested.
636
+ """
637
+
638
+ def __init__(self, mapping):
639
+ self._mapping = mapping
640
+ self._extra_content = {}
641
+ self._modules = {}
642
+
643
+ def __getitem__(self, key):
644
+ if key in self._extra_content:
645
+ return self._extra_content[key]
646
+ if key not in self._mapping:
647
+ raise KeyError(key)
648
+ value = self._mapping[key]
649
+ module_name = model_type_to_module_name(key)
650
+ if module_name not in self._modules:
651
+ self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models")
652
+ if hasattr(self._modules[module_name], value):
653
+ return getattr(self._modules[module_name], value)
654
+
655
+ # Some of the mappings have entries model_type -> config of another model type. In that case we try to grab the
656
+ # object at the top level.
657
+ transformers_module = importlib.import_module("transformers")
658
+ return getattr(transformers_module, value)
659
+
660
+ def keys(self):
661
+ return list(self._mapping.keys()) + list(self._extra_content.keys())
662
+
663
+ def values(self):
664
+ return [self[k] for k in self._mapping.keys()] + list(self._extra_content.values())
665
+
666
+ def items(self):
667
+ return [(k, self[k]) for k in self._mapping.keys()] + list(self._extra_content.items())
668
+
669
+ def __iter__(self):
670
+ return iter(list(self._mapping.keys()) + list(self._extra_content.keys()))
671
+
672
+ def __contains__(self, item):
673
+ return item in self._mapping or item in self._extra_content
674
+
675
+ def register(self, key, value, exist_ok=False):
676
+ """
677
+ Register a new configuration in this mapping.
678
+ """
679
+ if key in self._mapping.keys() and not exist_ok:
680
+ raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.")
681
+ self._extra_content[key] = value
682
+
683
+
684
+ CONFIG_MAPPING = _LazyConfigMapping(CONFIG_MAPPING_NAMES)
685
+
686
+
687
+ class _LazyLoadAllMappings(OrderedDict):
688
+ """
689
+ A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values,
690
+ etc.)
691
+
692
+ Args:
693
+ mapping: The mapping to load.
694
+ """
695
+
696
+ def __init__(self, mapping):
697
+ self._mapping = mapping
698
+ self._initialized = False
699
+ self._data = {}
700
+
701
+ def _initialize(self):
702
+ if self._initialized:
703
+ return
704
+
705
+ for model_type, map_name in self._mapping.items():
706
+ module_name = model_type_to_module_name(model_type)
707
+ module = importlib.import_module(f".{module_name}", "transformers.models")
708
+ mapping = getattr(module, map_name)
709
+ self._data.update(mapping)
710
+
711
+ self._initialized = True
712
+
713
+ def __getitem__(self, key):
714
+ self._initialize()
715
+ return self._data[key]
716
+
717
+ def keys(self):
718
+ self._initialize()
719
+ return self._data.keys()
720
+
721
+ def values(self):
722
+ self._initialize()
723
+ return self._data.values()
724
+
725
+ def items(self):
726
+ self._initialize()
727
+ return self._data.keys()
728
+
729
+ def __iter__(self):
730
+ self._initialize()
731
+ return iter(self._data)
732
+
733
+ def __contains__(self, item):
734
+ self._initialize()
735
+ return item in self._data
736
+
737
+
738
+ def _get_class_name(model_class: Union[str, List[str]]):
739
+ if isinstance(model_class, (list, tuple)):
740
+ return " or ".join([f"[`{c}`]" for c in model_class if c is not None])
741
+ return f"[`{model_class}`]"
742
+
743
+
744
+ def _list_model_options(indent, config_to_class=None, use_model_types=True):
745
+ if config_to_class is None and not use_model_types:
746
+ raise ValueError("Using `use_model_types=False` requires a `config_to_class` dictionary.")
747
+ if use_model_types:
748
+ if config_to_class is None:
749
+ model_type_to_name = {model_type: f"[`{config}`]" for model_type, config in CONFIG_MAPPING_NAMES.items()}
750
+ else:
751
+ model_type_to_name = {
752
+ model_type: _get_class_name(model_class)
753
+ for model_type, model_class in config_to_class.items()
754
+ if model_type in MODEL_NAMES_MAPPING
755
+ }
756
+ lines = [
757
+ f"{indent}- **{model_type}** -- {model_type_to_name[model_type]} ({MODEL_NAMES_MAPPING[model_type]} model)"
758
+ for model_type in sorted(model_type_to_name.keys())
759
+ ]
760
+ else:
761
+ config_to_name = {
762
+ CONFIG_MAPPING_NAMES[config]: _get_class_name(clas)
763
+ for config, clas in config_to_class.items()
764
+ if config in CONFIG_MAPPING_NAMES
765
+ }
766
+ config_to_model_name = {
767
+ config: MODEL_NAMES_MAPPING[model_type] for model_type, config in CONFIG_MAPPING_NAMES.items()
768
+ }
769
+ lines = [
770
+ f"{indent}- [`{config_name}`] configuration class:"
771
+ f" {config_to_name[config_name]} ({config_to_model_name[config_name]} model)"
772
+ for config_name in sorted(config_to_name.keys())
773
+ ]
774
+ return "\n".join(lines)
775
+
776
+
777
+ def replace_list_option_in_docstrings(config_to_class=None, use_model_types=True):
778
+ def docstring_decorator(fn):
779
+ docstrings = fn.__doc__
780
+ if docstrings is None:
781
+ # Example: -OO
782
+ return fn
783
+ lines = docstrings.split("\n")
784
+ i = 0
785
+ while i < len(lines) and re.search(r"^(\s*)List options\s*$", lines[i]) is None:
786
+ i += 1
787
+ if i < len(lines):
788
+ indent = re.search(r"^(\s*)List options\s*$", lines[i]).groups()[0]
789
+ if use_model_types:
790
+ indent = f"{indent} "
791
+ lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types)
792
+ docstrings = "\n".join(lines)
793
+ else:
794
+ raise ValueError(
795
+ f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current"
796
+ f" docstring is:\n{docstrings}"
797
+ )
798
+ fn.__doc__ = docstrings
799
+ return fn
800
+
801
+ return docstring_decorator
802
+
803
+
804
+ class AutoConfig:
805
+ r"""
806
+ This is a generic configuration class that will be instantiated as one of the configuration classes of the library
807
+ when created with the [`~AutoConfig.from_pretrained`] class method.
808
+
809
+ This class cannot be instantiated directly using `__init__()` (throws an error).
810
+ """
811
+
812
+ def __init__(self):
813
+ raise EnvironmentError(
814
+ "AutoConfig is designed to be instantiated "
815
+ "using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method."
816
+ )
817
+
818
+ @classmethod
819
+ def for_model(cls, model_type: str, *args, **kwargs):
820
+ if model_type in CONFIG_MAPPING:
821
+ config_class = CONFIG_MAPPING[model_type]
822
+ return config_class(*args, **kwargs)
823
+ raise ValueError(
824
+ f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}"
825
+ )
826
+
827
+ @classmethod
828
+ @replace_list_option_in_docstrings()
829
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
830
+ r"""
831
+ Instantiate one of the configuration classes of the library from a pretrained model configuration.
832
+
833
+ The configuration class to instantiate is selected based on the `model_type` property of the config object that
834
+ is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
835
+
836
+ List options
837
+
838
+ Args:
839
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
840
+ Can be either:
841
+
842
+ - A string, the *model id* of a pretrained model configuration hosted inside a model repo on
843
+ huggingface.co.
844
+ - A path to a *directory* containing a configuration file saved using the
845
+ [`~PretrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method,
846
+ e.g., `./my_model_directory/`.
847
+ - A path or url to a saved configuration JSON *file*, e.g.,
848
+ `./my_model_directory/configuration.json`.
849
+ cache_dir (`str` or `os.PathLike`, *optional*):
850
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
851
+ standard cache should not be used.
852
+ force_download (`bool`, *optional*, defaults to `False`):
853
+ Whether or not to force the (re-)download the model weights and configuration files and override the
854
+ cached versions if they exist.
855
+ resume_download (`bool`, *optional*, defaults to `False`):
856
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
857
+ file exists.
858
+ proxies (`Dict[str, str]`, *optional*):
859
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
860
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
861
+ revision (`str`, *optional*, defaults to `"main"`):
862
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
863
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
864
+ identifier allowed by git.
865
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
866
+ If `False`, then this function returns just the final configuration object.
867
+
868
+ If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
869
+ dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
870
+ part of `kwargs` which has not been used to update `config` and is otherwise ignored.
871
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
872
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
873
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
874
+ execute code present on the Hub on your local machine.
875
+ kwargs(additional keyword arguments, *optional*):
876
+ The values in kwargs of any keys which are configuration attributes will be used to override the loaded
877
+ values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
878
+ by the `return_unused_kwargs` keyword parameter.
879
+
880
+ Examples:
881
+
882
+ ```python
883
+ >>> from transformers import AutoConfig
884
+
885
+ >>> # Download configuration from huggingface.co and cache.
886
+ >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased")
887
+
888
+ >>> # Download configuration from huggingface.co (user-uploaded) and cache.
889
+ >>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased")
890
+
891
+ >>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*).
892
+ >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/")
893
+
894
+ >>> # Load a specific configuration file.
895
+ >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json")
896
+
897
+ >>> # Change some config attributes when loading a pretrained config.
898
+ >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False)
899
+ >>> config.output_attentions
900
+ True
901
+
902
+ >>> config, unused_kwargs = AutoConfig.from_pretrained(
903
+ ... "google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True
904
+ ... )
905
+ >>> config.output_attentions
906
+ True
907
+
908
+ >>> unused_kwargs
909
+ {'foo': False}
910
+ ```"""
911
+ use_auth_token = kwargs.pop("use_auth_token", None)
912
+ if use_auth_token is not None:
913
+ warnings.warn(
914
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
915
+ FutureWarning,
916
+ )
917
+ if kwargs.get("token", None) is not None:
918
+ raise ValueError(
919
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
920
+ )
921
+ kwargs["token"] = use_auth_token
922
+
923
+ kwargs["_from_auto"] = True
924
+ kwargs["name_or_path"] = pretrained_model_name_or_path
925
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
926
+ code_revision = kwargs.pop("code_revision", None)
927
+
928
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
929
+ has_remote_code = "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"]
930
+ has_local_code = "model_type" in config_dict and config_dict["model_type"] in CONFIG_MAPPING
931
+ trust_remote_code = resolve_trust_remote_code(
932
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
933
+ )
934
+
935
+ if has_remote_code and trust_remote_code:
936
+ class_ref = config_dict["auto_map"]["AutoConfig"]
937
+ config_class = get_class_from_dynamic_module(
938
+ class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs
939
+ )
940
+ if os.path.isdir(pretrained_model_name_or_path):
941
+ config_class.register_for_auto_class()
942
+ return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs)
943
+ elif "model_type" in config_dict:
944
+ try:
945
+ config_class = CONFIG_MAPPING[config_dict["model_type"]]
946
+ except KeyError:
947
+ raise ValueError(
948
+ f"The checkpoint you are trying to load has model type `{config_dict['model_type']}` "
949
+ "but Transformers does not recognize this architecture. This could be because of an "
950
+ "issue with the checkpoint, or because your version of Transformers is out of date."
951
+ )
952
+ return config_class.from_dict(config_dict, **unused_kwargs)
953
+ else:
954
+ # Fallback: use pattern matching on the string.
955
+ # We go from longer names to shorter names to catch roberta before bert (for instance)
956
+ for pattern in sorted(CONFIG_MAPPING.keys(), key=len, reverse=True):
957
+ if pattern in str(pretrained_model_name_or_path):
958
+ return CONFIG_MAPPING[pattern].from_dict(config_dict, **unused_kwargs)
959
+
960
+ raise ValueError(
961
+ f"Unrecognized model in {pretrained_model_name_or_path}. "
962
+ f"Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings "
963
+ f"in its name: {', '.join(CONFIG_MAPPING.keys())}"
964
+ )
965
+
966
+ @staticmethod
967
+ def register(model_type, config, exist_ok=False):
968
+ """
969
+ Register a new configuration for this class.
970
+
971
+ Args:
972
+ model_type (`str`): The model type like "bert" or "gpt".
973
+ config ([`PretrainedConfig`]): The config to register.
974
+ """
975
+ if issubclass(config, PretrainedConfig) and config.model_type != model_type:
976
+ raise ValueError(
977
+ "The config you are passing has a `model_type` attribute that is not consistent with the model type "
978
+ f"you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they "
979
+ "match!"
980
+ )
981
+ CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok)
982
+
983
+
984
+ ALL_PRETRAINED_CONFIG_ARCHIVE_MAP = _LazyLoadAllMappings(CONFIG_ARCHIVE_MAP_MAPPING_NAMES)
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/feature_extraction_auto.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ AutoFeatureExtractor class."""
16
+ import importlib
17
+ import json
18
+ import os
19
+ import warnings
20
+ from collections import OrderedDict
21
+ from typing import Dict, Optional, Union
22
+
23
+ # Build the list of all feature extractors
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
26
+ from ...feature_extraction_utils import FeatureExtractionMixin
27
+ from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
28
+ from .auto_factory import _LazyAutoMapping
29
+ from .configuration_auto import (
30
+ CONFIG_MAPPING_NAMES,
31
+ AutoConfig,
32
+ model_type_to_module_name,
33
+ replace_list_option_in_docstrings,
34
+ )
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ FEATURE_EXTRACTOR_MAPPING_NAMES = OrderedDict(
40
+ [
41
+ ("audio-spectrogram-transformer", "ASTFeatureExtractor"),
42
+ ("beit", "BeitFeatureExtractor"),
43
+ ("chinese_clip", "ChineseCLIPFeatureExtractor"),
44
+ ("clap", "ClapFeatureExtractor"),
45
+ ("clip", "CLIPFeatureExtractor"),
46
+ ("clipseg", "ViTFeatureExtractor"),
47
+ ("clvp", "ClvpFeatureExtractor"),
48
+ ("conditional_detr", "ConditionalDetrFeatureExtractor"),
49
+ ("convnext", "ConvNextFeatureExtractor"),
50
+ ("cvt", "ConvNextFeatureExtractor"),
51
+ ("data2vec-audio", "Wav2Vec2FeatureExtractor"),
52
+ ("data2vec-vision", "BeitFeatureExtractor"),
53
+ ("deformable_detr", "DeformableDetrFeatureExtractor"),
54
+ ("deit", "DeiTFeatureExtractor"),
55
+ ("detr", "DetrFeatureExtractor"),
56
+ ("dinat", "ViTFeatureExtractor"),
57
+ ("donut-swin", "DonutFeatureExtractor"),
58
+ ("dpt", "DPTFeatureExtractor"),
59
+ ("encodec", "EncodecFeatureExtractor"),
60
+ ("flava", "FlavaFeatureExtractor"),
61
+ ("glpn", "GLPNFeatureExtractor"),
62
+ ("groupvit", "CLIPFeatureExtractor"),
63
+ ("hubert", "Wav2Vec2FeatureExtractor"),
64
+ ("imagegpt", "ImageGPTFeatureExtractor"),
65
+ ("layoutlmv2", "LayoutLMv2FeatureExtractor"),
66
+ ("layoutlmv3", "LayoutLMv3FeatureExtractor"),
67
+ ("levit", "LevitFeatureExtractor"),
68
+ ("maskformer", "MaskFormerFeatureExtractor"),
69
+ ("mctct", "MCTCTFeatureExtractor"),
70
+ ("mobilenet_v1", "MobileNetV1FeatureExtractor"),
71
+ ("mobilenet_v2", "MobileNetV2FeatureExtractor"),
72
+ ("mobilevit", "MobileViTFeatureExtractor"),
73
+ ("nat", "ViTFeatureExtractor"),
74
+ ("owlvit", "OwlViTFeatureExtractor"),
75
+ ("perceiver", "PerceiverFeatureExtractor"),
76
+ ("poolformer", "PoolFormerFeatureExtractor"),
77
+ ("pop2piano", "Pop2PianoFeatureExtractor"),
78
+ ("regnet", "ConvNextFeatureExtractor"),
79
+ ("resnet", "ConvNextFeatureExtractor"),
80
+ ("seamless_m4t", "SeamlessM4TFeatureExtractor"),
81
+ ("seamless_m4t_v2", "SeamlessM4TFeatureExtractor"),
82
+ ("segformer", "SegformerFeatureExtractor"),
83
+ ("sew", "Wav2Vec2FeatureExtractor"),
84
+ ("sew-d", "Wav2Vec2FeatureExtractor"),
85
+ ("speech_to_text", "Speech2TextFeatureExtractor"),
86
+ ("speecht5", "SpeechT5FeatureExtractor"),
87
+ ("swiftformer", "ViTFeatureExtractor"),
88
+ ("swin", "ViTFeatureExtractor"),
89
+ ("swinv2", "ViTFeatureExtractor"),
90
+ ("table-transformer", "DetrFeatureExtractor"),
91
+ ("timesformer", "VideoMAEFeatureExtractor"),
92
+ ("tvlt", "TvltFeatureExtractor"),
93
+ ("unispeech", "Wav2Vec2FeatureExtractor"),
94
+ ("unispeech-sat", "Wav2Vec2FeatureExtractor"),
95
+ ("univnet", "UnivNetFeatureExtractor"),
96
+ ("van", "ConvNextFeatureExtractor"),
97
+ ("videomae", "VideoMAEFeatureExtractor"),
98
+ ("vilt", "ViltFeatureExtractor"),
99
+ ("vit", "ViTFeatureExtractor"),
100
+ ("vit_mae", "ViTFeatureExtractor"),
101
+ ("vit_msn", "ViTFeatureExtractor"),
102
+ ("wav2vec2", "Wav2Vec2FeatureExtractor"),
103
+ ("wav2vec2-bert", "Wav2Vec2FeatureExtractor"),
104
+ ("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
105
+ ("wavlm", "Wav2Vec2FeatureExtractor"),
106
+ ("whisper", "WhisperFeatureExtractor"),
107
+ ("xclip", "CLIPFeatureExtractor"),
108
+ ("yolos", "YolosFeatureExtractor"),
109
+ ]
110
+ )
111
+
112
+ FEATURE_EXTRACTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
113
+
114
+
115
+ def feature_extractor_class_from_name(class_name: str):
116
+ for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
117
+ if class_name in extractors:
118
+ module_name = model_type_to_module_name(module_name)
119
+
120
+ module = importlib.import_module(f".{module_name}", "transformers.models")
121
+ try:
122
+ return getattr(module, class_name)
123
+ except AttributeError:
124
+ continue
125
+
126
+ for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
127
+ if getattr(extractor, "__name__", None) == class_name:
128
+ return extractor
129
+
130
+ # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
131
+ # init and we return the proper dummy to get an appropriate error message.
132
+ main_module = importlib.import_module("transformers")
133
+ if hasattr(main_module, class_name):
134
+ return getattr(main_module, class_name)
135
+
136
+ return None
137
+
138
+
139
+ def get_feature_extractor_config(
140
+ pretrained_model_name_or_path: Union[str, os.PathLike],
141
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
142
+ force_download: bool = False,
143
+ resume_download: bool = False,
144
+ proxies: Optional[Dict[str, str]] = None,
145
+ token: Optional[Union[bool, str]] = None,
146
+ revision: Optional[str] = None,
147
+ local_files_only: bool = False,
148
+ **kwargs,
149
+ ):
150
+ """
151
+ Loads the tokenizer configuration from a pretrained model tokenizer configuration.
152
+
153
+ Args:
154
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
155
+ This can be either:
156
+
157
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
158
+ huggingface.co.
159
+ - a path to a *directory* containing a configuration file saved using the
160
+ [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
161
+
162
+ cache_dir (`str` or `os.PathLike`, *optional*):
163
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
164
+ cache should not be used.
165
+ force_download (`bool`, *optional*, defaults to `False`):
166
+ Whether or not to force to (re-)download the configuration files and override the cached versions if they
167
+ exist.
168
+ resume_download (`bool`, *optional*, defaults to `False`):
169
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
170
+ proxies (`Dict[str, str]`, *optional*):
171
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
172
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
173
+ token (`str` or *bool*, *optional*):
174
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
175
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
176
+ revision (`str`, *optional*, defaults to `"main"`):
177
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
178
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
179
+ identifier allowed by git.
180
+ local_files_only (`bool`, *optional*, defaults to `False`):
181
+ If `True`, will only try to load the tokenizer configuration from local files.
182
+
183
+ <Tip>
184
+
185
+ Passing `token=True` is required when you want to use a private model.
186
+
187
+ </Tip>
188
+
189
+ Returns:
190
+ `Dict`: The configuration of the tokenizer.
191
+
192
+ Examples:
193
+
194
+ ```python
195
+ # Download configuration from huggingface.co and cache.
196
+ tokenizer_config = get_tokenizer_config("google-bert/bert-base-uncased")
197
+ # This model does not have a tokenizer config so the result will be an empty dict.
198
+ tokenizer_config = get_tokenizer_config("FacebookAI/xlm-roberta-base")
199
+
200
+ # Save a pretrained tokenizer locally and you can reload its config
201
+ from transformers import AutoTokenizer
202
+
203
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
204
+ tokenizer.save_pretrained("tokenizer-test")
205
+ tokenizer_config = get_tokenizer_config("tokenizer-test")
206
+ ```"""
207
+ use_auth_token = kwargs.pop("use_auth_token", None)
208
+ if use_auth_token is not None:
209
+ warnings.warn(
210
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
211
+ FutureWarning,
212
+ )
213
+ if token is not None:
214
+ raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
215
+ token = use_auth_token
216
+
217
+ resolved_config_file = get_file_from_repo(
218
+ pretrained_model_name_or_path,
219
+ FEATURE_EXTRACTOR_NAME,
220
+ cache_dir=cache_dir,
221
+ force_download=force_download,
222
+ resume_download=resume_download,
223
+ proxies=proxies,
224
+ token=token,
225
+ revision=revision,
226
+ local_files_only=local_files_only,
227
+ )
228
+ if resolved_config_file is None:
229
+ logger.info(
230
+ "Could not locate the feature extractor configuration file, will try to use the model config instead."
231
+ )
232
+ return {}
233
+
234
+ with open(resolved_config_file, encoding="utf-8") as reader:
235
+ return json.load(reader)
236
+
237
+
238
+ class AutoFeatureExtractor:
239
+ r"""
240
+ This is a generic feature extractor class that will be instantiated as one of the feature extractor classes of the
241
+ library when created with the [`AutoFeatureExtractor.from_pretrained`] class method.
242
+
243
+ This class cannot be instantiated directly using `__init__()` (throws an error).
244
+ """
245
+
246
+ def __init__(self):
247
+ raise EnvironmentError(
248
+ "AutoFeatureExtractor is designed to be instantiated "
249
+ "using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method."
250
+ )
251
+
252
+ @classmethod
253
+ @replace_list_option_in_docstrings(FEATURE_EXTRACTOR_MAPPING_NAMES)
254
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
255
+ r"""
256
+ Instantiate one of the feature extractor classes of the library from a pretrained model vocabulary.
257
+
258
+ The feature extractor class to instantiate is selected based on the `model_type` property of the config object
259
+ (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
260
+ missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
261
+
262
+ List options
263
+
264
+ Params:
265
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
266
+ This can be either:
267
+
268
+ - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
269
+ huggingface.co.
270
+ - a path to a *directory* containing a feature extractor file saved using the
271
+ [`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g.,
272
+ `./my_model_directory/`.
273
+ - a path or url to a saved feature extractor JSON *file*, e.g.,
274
+ `./my_model_directory/preprocessor_config.json`.
275
+ cache_dir (`str` or `os.PathLike`, *optional*):
276
+ Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
277
+ standard cache should not be used.
278
+ force_download (`bool`, *optional*, defaults to `False`):
279
+ Whether or not to force to (re-)download the feature extractor files and override the cached versions
280
+ if they exist.
281
+ resume_download (`bool`, *optional*, defaults to `False`):
282
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
283
+ exists.
284
+ proxies (`Dict[str, str]`, *optional*):
285
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
286
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
287
+ token (`str` or *bool*, *optional*):
288
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
289
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
290
+ revision (`str`, *optional*, defaults to `"main"`):
291
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
292
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
293
+ identifier allowed by git.
294
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
295
+ If `False`, then this function returns just the final feature extractor object. If `True`, then this
296
+ functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
297
+ consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
298
+ `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
299
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
300
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
301
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
302
+ execute code present on the Hub on your local machine.
303
+ kwargs (`Dict[str, Any]`, *optional*):
304
+ The values in kwargs of any keys which are feature extractor attributes will be used to override the
305
+ loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
306
+ controlled by the `return_unused_kwargs` keyword parameter.
307
+
308
+ <Tip>
309
+
310
+ Passing `token=True` is required when you want to use a private model.
311
+
312
+ </Tip>
313
+
314
+ Examples:
315
+
316
+ ```python
317
+ >>> from transformers import AutoFeatureExtractor
318
+
319
+ >>> # Download feature extractor from huggingface.co and cache.
320
+ >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h")
321
+
322
+ >>> # If feature extractor files are in a directory (e.g. feature extractor was saved using *save_pretrained('./test/saved_model/')*)
323
+ >>> # feature_extractor = AutoFeatureExtractor.from_pretrained("./test/saved_model/")
324
+ ```"""
325
+ use_auth_token = kwargs.pop("use_auth_token", None)
326
+ if use_auth_token is not None:
327
+ warnings.warn(
328
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
329
+ FutureWarning,
330
+ )
331
+ if kwargs.get("token", None) is not None:
332
+ raise ValueError(
333
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
334
+ )
335
+ kwargs["token"] = use_auth_token
336
+
337
+ config = kwargs.pop("config", None)
338
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
339
+ kwargs["_from_auto"] = True
340
+
341
+ config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
342
+ feature_extractor_class = config_dict.get("feature_extractor_type", None)
343
+ feature_extractor_auto_map = None
344
+ if "AutoFeatureExtractor" in config_dict.get("auto_map", {}):
345
+ feature_extractor_auto_map = config_dict["auto_map"]["AutoFeatureExtractor"]
346
+
347
+ # If we don't find the feature extractor class in the feature extractor config, let's try the model config.
348
+ if feature_extractor_class is None and feature_extractor_auto_map is None:
349
+ if not isinstance(config, PretrainedConfig):
350
+ config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
351
+ # It could be in `config.feature_extractor_type``
352
+ feature_extractor_class = getattr(config, "feature_extractor_type", None)
353
+ if hasattr(config, "auto_map") and "AutoFeatureExtractor" in config.auto_map:
354
+ feature_extractor_auto_map = config.auto_map["AutoFeatureExtractor"]
355
+
356
+ if feature_extractor_class is not None:
357
+ feature_extractor_class = feature_extractor_class_from_name(feature_extractor_class)
358
+
359
+ has_remote_code = feature_extractor_auto_map is not None
360
+ has_local_code = feature_extractor_class is not None or type(config) in FEATURE_EXTRACTOR_MAPPING
361
+ trust_remote_code = resolve_trust_remote_code(
362
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
363
+ )
364
+
365
+ if has_remote_code and trust_remote_code:
366
+ feature_extractor_class = get_class_from_dynamic_module(
367
+ feature_extractor_auto_map, pretrained_model_name_or_path, **kwargs
368
+ )
369
+ _ = kwargs.pop("code_revision", None)
370
+ if os.path.isdir(pretrained_model_name_or_path):
371
+ feature_extractor_class.register_for_auto_class()
372
+ return feature_extractor_class.from_dict(config_dict, **kwargs)
373
+ elif feature_extractor_class is not None:
374
+ return feature_extractor_class.from_dict(config_dict, **kwargs)
375
+ # Last try: we use the FEATURE_EXTRACTOR_MAPPING.
376
+ elif type(config) in FEATURE_EXTRACTOR_MAPPING:
377
+ feature_extractor_class = FEATURE_EXTRACTOR_MAPPING[type(config)]
378
+ return feature_extractor_class.from_dict(config_dict, **kwargs)
379
+
380
+ raise ValueError(
381
+ f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
382
+ f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
383
+ f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys())}"
384
+ )
385
+
386
+ @staticmethod
387
+ def register(config_class, feature_extractor_class, exist_ok=False):
388
+ """
389
+ Register a new feature extractor for this class.
390
+
391
+ Args:
392
+ config_class ([`PretrainedConfig`]):
393
+ The configuration corresponding to the model to register.
394
+ feature_extractor_class ([`FeatureExtractorMixin`]): The feature extractor to register.
395
+ """
396
+ FEATURE_EXTRACTOR_MAPPING.register(config_class, feature_extractor_class, exist_ok=exist_ok)
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/image_processing_auto.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ AutoImageProcessor class."""
16
+ import importlib
17
+ import json
18
+ import os
19
+ import warnings
20
+ from collections import OrderedDict
21
+ from typing import Dict, Optional, Union
22
+
23
+ # Build the list of all image processors
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
26
+ from ...image_processing_utils import ImageProcessingMixin
27
+ from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
28
+ from .auto_factory import _LazyAutoMapping
29
+ from .configuration_auto import (
30
+ CONFIG_MAPPING_NAMES,
31
+ AutoConfig,
32
+ model_type_to_module_name,
33
+ replace_list_option_in_docstrings,
34
+ )
35
+
36
+
37
+ logger = logging.get_logger(__name__)
38
+
39
+ IMAGE_PROCESSOR_MAPPING_NAMES = OrderedDict(
40
+ [
41
+ ("align", "EfficientNetImageProcessor"),
42
+ ("beit", "BeitImageProcessor"),
43
+ ("bit", "BitImageProcessor"),
44
+ ("blip", "BlipImageProcessor"),
45
+ ("blip-2", "BlipImageProcessor"),
46
+ ("bridgetower", "BridgeTowerImageProcessor"),
47
+ ("chinese_clip", "ChineseCLIPImageProcessor"),
48
+ ("clip", "CLIPImageProcessor"),
49
+ ("clipseg", "ViTImageProcessor"),
50
+ ("conditional_detr", "ConditionalDetrImageProcessor"),
51
+ ("convnext", "ConvNextImageProcessor"),
52
+ ("convnextv2", "ConvNextImageProcessor"),
53
+ ("cvt", "ConvNextImageProcessor"),
54
+ ("data2vec-vision", "BeitImageProcessor"),
55
+ ("deformable_detr", "DeformableDetrImageProcessor"),
56
+ ("deit", "DeiTImageProcessor"),
57
+ ("depth_anything", "DPTImageProcessor"),
58
+ ("deta", "DetaImageProcessor"),
59
+ ("detr", "DetrImageProcessor"),
60
+ ("dinat", "ViTImageProcessor"),
61
+ ("dinov2", "BitImageProcessor"),
62
+ ("donut-swin", "DonutImageProcessor"),
63
+ ("dpt", "DPTImageProcessor"),
64
+ ("efficientformer", "EfficientFormerImageProcessor"),
65
+ ("efficientnet", "EfficientNetImageProcessor"),
66
+ ("flava", "FlavaImageProcessor"),
67
+ ("focalnet", "BitImageProcessor"),
68
+ ("fuyu", "FuyuImageProcessor"),
69
+ ("git", "CLIPImageProcessor"),
70
+ ("glpn", "GLPNImageProcessor"),
71
+ ("grounding-dino", "GroundingDinoImageProcessor"),
72
+ ("groupvit", "CLIPImageProcessor"),
73
+ ("idefics", "IdeficsImageProcessor"),
74
+ ("idefics2", "Idefics2ImageProcessor"),
75
+ ("imagegpt", "ImageGPTImageProcessor"),
76
+ ("instructblip", "BlipImageProcessor"),
77
+ ("kosmos-2", "CLIPImageProcessor"),
78
+ ("layoutlmv2", "LayoutLMv2ImageProcessor"),
79
+ ("layoutlmv3", "LayoutLMv3ImageProcessor"),
80
+ ("levit", "LevitImageProcessor"),
81
+ ("llava", "CLIPImageProcessor"),
82
+ ("llava_next", "LlavaNextImageProcessor"),
83
+ ("mask2former", "Mask2FormerImageProcessor"),
84
+ ("maskformer", "MaskFormerImageProcessor"),
85
+ ("mgp-str", "ViTImageProcessor"),
86
+ ("mobilenet_v1", "MobileNetV1ImageProcessor"),
87
+ ("mobilenet_v2", "MobileNetV2ImageProcessor"),
88
+ ("mobilevit", "MobileViTImageProcessor"),
89
+ ("mobilevit", "MobileViTImageProcessor"),
90
+ ("mobilevitv2", "MobileViTImageProcessor"),
91
+ ("nat", "ViTImageProcessor"),
92
+ ("nougat", "NougatImageProcessor"),
93
+ ("oneformer", "OneFormerImageProcessor"),
94
+ ("owlv2", "Owlv2ImageProcessor"),
95
+ ("owlvit", "OwlViTImageProcessor"),
96
+ ("perceiver", "PerceiverImageProcessor"),
97
+ ("pix2struct", "Pix2StructImageProcessor"),
98
+ ("poolformer", "PoolFormerImageProcessor"),
99
+ ("pvt", "PvtImageProcessor"),
100
+ ("pvt_v2", "PvtImageProcessor"),
101
+ ("regnet", "ConvNextImageProcessor"),
102
+ ("resnet", "ConvNextImageProcessor"),
103
+ ("sam", "SamImageProcessor"),
104
+ ("segformer", "SegformerImageProcessor"),
105
+ ("seggpt", "SegGptImageProcessor"),
106
+ ("siglip", "SiglipImageProcessor"),
107
+ ("swiftformer", "ViTImageProcessor"),
108
+ ("swin", "ViTImageProcessor"),
109
+ ("swin2sr", "Swin2SRImageProcessor"),
110
+ ("swinv2", "ViTImageProcessor"),
111
+ ("table-transformer", "DetrImageProcessor"),
112
+ ("timesformer", "VideoMAEImageProcessor"),
113
+ ("tvlt", "TvltImageProcessor"),
114
+ ("tvp", "TvpImageProcessor"),
115
+ ("udop", "LayoutLMv3ImageProcessor"),
116
+ ("upernet", "SegformerImageProcessor"),
117
+ ("van", "ConvNextImageProcessor"),
118
+ ("videomae", "VideoMAEImageProcessor"),
119
+ ("vilt", "ViltImageProcessor"),
120
+ ("vipllava", "CLIPImageProcessor"),
121
+ ("vit", "ViTImageProcessor"),
122
+ ("vit_hybrid", "ViTHybridImageProcessor"),
123
+ ("vit_mae", "ViTImageProcessor"),
124
+ ("vit_msn", "ViTImageProcessor"),
125
+ ("vitmatte", "VitMatteImageProcessor"),
126
+ ("xclip", "CLIPImageProcessor"),
127
+ ("yolos", "YolosImageProcessor"),
128
+ ]
129
+ )
130
+
131
+ IMAGE_PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
132
+
133
+
134
+ def image_processor_class_from_name(class_name: str):
135
+ for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
136
+ if class_name in extractors:
137
+ module_name = model_type_to_module_name(module_name)
138
+
139
+ module = importlib.import_module(f".{module_name}", "transformers.models")
140
+ try:
141
+ return getattr(module, class_name)
142
+ except AttributeError:
143
+ continue
144
+
145
+ for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
146
+ if getattr(extractor, "__name__", None) == class_name:
147
+ return extractor
148
+
149
+ # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
150
+ # init and we return the proper dummy to get an appropriate error message.
151
+ main_module = importlib.import_module("transformers")
152
+ if hasattr(main_module, class_name):
153
+ return getattr(main_module, class_name)
154
+
155
+ return None
156
+
157
+
158
+ def get_image_processor_config(
159
+ pretrained_model_name_or_path: Union[str, os.PathLike],
160
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
161
+ force_download: bool = False,
162
+ resume_download: bool = False,
163
+ proxies: Optional[Dict[str, str]] = None,
164
+ token: Optional[Union[bool, str]] = None,
165
+ revision: Optional[str] = None,
166
+ local_files_only: bool = False,
167
+ **kwargs,
168
+ ):
169
+ """
170
+ Loads the image processor configuration from a pretrained model image processor configuration.
171
+
172
+ Args:
173
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
174
+ This can be either:
175
+
176
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
177
+ huggingface.co.
178
+ - a path to a *directory* containing a configuration file saved using the
179
+ [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
180
+
181
+ cache_dir (`str` or `os.PathLike`, *optional*):
182
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
183
+ cache should not be used.
184
+ force_download (`bool`, *optional*, defaults to `False`):
185
+ Whether or not to force to (re-)download the configuration files and override the cached versions if they
186
+ exist.
187
+ resume_download (`bool`, *optional*, defaults to `False`):
188
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
189
+ proxies (`Dict[str, str]`, *optional*):
190
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
191
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
192
+ token (`str` or *bool*, *optional*):
193
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
194
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
195
+ revision (`str`, *optional*, defaults to `"main"`):
196
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
197
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
198
+ identifier allowed by git.
199
+ local_files_only (`bool`, *optional*, defaults to `False`):
200
+ If `True`, will only try to load the image processor configuration from local files.
201
+
202
+ <Tip>
203
+
204
+ Passing `token=True` is required when you want to use a private model.
205
+
206
+ </Tip>
207
+
208
+ Returns:
209
+ `Dict`: The configuration of the image processor.
210
+
211
+ Examples:
212
+
213
+ ```python
214
+ # Download configuration from huggingface.co and cache.
215
+ image_processor_config = get_image_processor_config("google-bert/bert-base-uncased")
216
+ # This model does not have a image processor config so the result will be an empty dict.
217
+ image_processor_config = get_image_processor_config("FacebookAI/xlm-roberta-base")
218
+
219
+ # Save a pretrained image processor locally and you can reload its config
220
+ from transformers import AutoTokenizer
221
+
222
+ image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
223
+ image_processor.save_pretrained("image-processor-test")
224
+ image_processor_config = get_image_processor_config("image-processor-test")
225
+ ```"""
226
+ use_auth_token = kwargs.pop("use_auth_token", None)
227
+ if use_auth_token is not None:
228
+ warnings.warn(
229
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
230
+ FutureWarning,
231
+ )
232
+ if token is not None:
233
+ raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
234
+ token = use_auth_token
235
+
236
+ resolved_config_file = get_file_from_repo(
237
+ pretrained_model_name_or_path,
238
+ IMAGE_PROCESSOR_NAME,
239
+ cache_dir=cache_dir,
240
+ force_download=force_download,
241
+ resume_download=resume_download,
242
+ proxies=proxies,
243
+ token=token,
244
+ revision=revision,
245
+ local_files_only=local_files_only,
246
+ )
247
+ if resolved_config_file is None:
248
+ logger.info(
249
+ "Could not locate the image processor configuration file, will try to use the model config instead."
250
+ )
251
+ return {}
252
+
253
+ with open(resolved_config_file, encoding="utf-8") as reader:
254
+ return json.load(reader)
255
+
256
+
257
+ class AutoImageProcessor:
258
+ r"""
259
+ This is a generic image processor class that will be instantiated as one of the image processor classes of the
260
+ library when created with the [`AutoImageProcessor.from_pretrained`] class method.
261
+
262
+ This class cannot be instantiated directly using `__init__()` (throws an error).
263
+ """
264
+
265
+ def __init__(self):
266
+ raise EnvironmentError(
267
+ "AutoImageProcessor is designed to be instantiated "
268
+ "using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method."
269
+ )
270
+
271
+ @classmethod
272
+ @replace_list_option_in_docstrings(IMAGE_PROCESSOR_MAPPING_NAMES)
273
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
274
+ r"""
275
+ Instantiate one of the image processor classes of the library from a pretrained model vocabulary.
276
+
277
+ The image processor class to instantiate is selected based on the `model_type` property of the config object
278
+ (either passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's
279
+ missing, by falling back to using pattern matching on `pretrained_model_name_or_path`:
280
+
281
+ List options
282
+
283
+ Params:
284
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
285
+ This can be either:
286
+
287
+ - a string, the *model id* of a pretrained image_processor hosted inside a model repo on
288
+ huggingface.co.
289
+ - a path to a *directory* containing a image processor file saved using the
290
+ [`~image_processing_utils.ImageProcessingMixin.save_pretrained`] method, e.g.,
291
+ `./my_model_directory/`.
292
+ - a path or url to a saved image processor JSON *file*, e.g.,
293
+ `./my_model_directory/preprocessor_config.json`.
294
+ cache_dir (`str` or `os.PathLike`, *optional*):
295
+ Path to a directory in which a downloaded pretrained model image processor should be cached if the
296
+ standard cache should not be used.
297
+ force_download (`bool`, *optional*, defaults to `False`):
298
+ Whether or not to force to (re-)download the image processor files and override the cached versions if
299
+ they exist.
300
+ resume_download (`bool`, *optional*, defaults to `False`):
301
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
302
+ exists.
303
+ proxies (`Dict[str, str]`, *optional*):
304
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
305
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
306
+ token (`str` or *bool*, *optional*):
307
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
308
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
309
+ revision (`str`, *optional*, defaults to `"main"`):
310
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
311
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
312
+ identifier allowed by git.
313
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
314
+ If `False`, then this function returns just the final image processor object. If `True`, then this
315
+ functions returns a `Tuple(image_processor, unused_kwargs)` where *unused_kwargs* is a dictionary
316
+ consisting of the key/value pairs whose keys are not image processor attributes: i.e., the part of
317
+ `kwargs` which has not been used to update `image_processor` and is otherwise ignored.
318
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
319
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
320
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
321
+ execute code present on the Hub on your local machine.
322
+ kwargs (`Dict[str, Any]`, *optional*):
323
+ The values in kwargs of any keys which are image processor attributes will be used to override the
324
+ loaded values. Behavior concerning key/value pairs whose keys are *not* image processor attributes is
325
+ controlled by the `return_unused_kwargs` keyword parameter.
326
+
327
+ <Tip>
328
+
329
+ Passing `token=True` is required when you want to use a private model.
330
+
331
+ </Tip>
332
+
333
+ Examples:
334
+
335
+ ```python
336
+ >>> from transformers import AutoImageProcessor
337
+
338
+ >>> # Download image processor from huggingface.co and cache.
339
+ >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224-in21k")
340
+
341
+ >>> # If image processor files are in a directory (e.g. image processor was saved using *save_pretrained('./test/saved_model/')*)
342
+ >>> # image_processor = AutoImageProcessor.from_pretrained("./test/saved_model/")
343
+ ```"""
344
+ use_auth_token = kwargs.pop("use_auth_token", None)
345
+ if use_auth_token is not None:
346
+ warnings.warn(
347
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
348
+ FutureWarning,
349
+ )
350
+ if kwargs.get("token", None) is not None:
351
+ raise ValueError(
352
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
353
+ )
354
+ kwargs["token"] = use_auth_token
355
+
356
+ config = kwargs.pop("config", None)
357
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
358
+ kwargs["_from_auto"] = True
359
+
360
+ config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs)
361
+ image_processor_class = config_dict.get("image_processor_type", None)
362
+ image_processor_auto_map = None
363
+ if "AutoImageProcessor" in config_dict.get("auto_map", {}):
364
+ image_processor_auto_map = config_dict["auto_map"]["AutoImageProcessor"]
365
+
366
+ # If we still don't have the image processor class, check if we're loading from a previous feature extractor config
367
+ # and if so, infer the image processor class from there.
368
+ if image_processor_class is None and image_processor_auto_map is None:
369
+ feature_extractor_class = config_dict.pop("feature_extractor_type", None)
370
+ if feature_extractor_class is not None:
371
+ logger.warning(
372
+ "Could not find image processor class in the image processor config or the model config. Loading "
373
+ "based on pattern matching with the model's feature extractor configuration. Please open a "
374
+ "PR/issue to update `preprocessor_config.json` to use `image_processor_type` instead of "
375
+ "`feature_extractor_type`. This warning will be removed in v4.40."
376
+ )
377
+ image_processor_class = feature_extractor_class.replace("FeatureExtractor", "ImageProcessor")
378
+ if "AutoFeatureExtractor" in config_dict.get("auto_map", {}):
379
+ feature_extractor_auto_map = config_dict["auto_map"]["AutoFeatureExtractor"]
380
+ image_processor_auto_map = feature_extractor_auto_map.replace("FeatureExtractor", "ImageProcessor")
381
+ logger.warning(
382
+ "Could not find image processor auto map in the image processor config or the model config. "
383
+ "Loading based on pattern matching with the model's feature extractor configuration. Please open a "
384
+ "PR/issue to update `preprocessor_config.json` to use `AutoImageProcessor` instead of "
385
+ "`AutoFeatureExtractor`. This warning will be removed in v4.40."
386
+ )
387
+
388
+ # If we don't find the image processor class in the image processor config, let's try the model config.
389
+ if image_processor_class is None and image_processor_auto_map is None:
390
+ if not isinstance(config, PretrainedConfig):
391
+ config = AutoConfig.from_pretrained(pretrained_model_name_or_path, **kwargs)
392
+ # It could be in `config.image_processor_type``
393
+ image_processor_class = getattr(config, "image_processor_type", None)
394
+ if hasattr(config, "auto_map") and "AutoImageProcessor" in config.auto_map:
395
+ image_processor_auto_map = config.auto_map["AutoImageProcessor"]
396
+
397
+ if image_processor_class is not None:
398
+ image_processor_class = image_processor_class_from_name(image_processor_class)
399
+
400
+ has_remote_code = image_processor_auto_map is not None
401
+ has_local_code = image_processor_class is not None or type(config) in IMAGE_PROCESSOR_MAPPING
402
+ trust_remote_code = resolve_trust_remote_code(
403
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
404
+ )
405
+
406
+ if has_remote_code and trust_remote_code:
407
+ image_processor_class = get_class_from_dynamic_module(
408
+ image_processor_auto_map, pretrained_model_name_or_path, **kwargs
409
+ )
410
+ _ = kwargs.pop("code_revision", None)
411
+ if os.path.isdir(pretrained_model_name_or_path):
412
+ image_processor_class.register_for_auto_class()
413
+ return image_processor_class.from_dict(config_dict, **kwargs)
414
+ elif image_processor_class is not None:
415
+ return image_processor_class.from_dict(config_dict, **kwargs)
416
+ # Last try: we use the IMAGE_PROCESSOR_MAPPING.
417
+ elif type(config) in IMAGE_PROCESSOR_MAPPING:
418
+ image_processor_class = IMAGE_PROCESSOR_MAPPING[type(config)]
419
+ return image_processor_class.from_dict(config_dict, **kwargs)
420
+
421
+ raise ValueError(
422
+ f"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
423
+ f"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
424
+ f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys())}"
425
+ )
426
+
427
+ @staticmethod
428
+ def register(config_class, image_processor_class, exist_ok=False):
429
+ """
430
+ Register a new image processor for this class.
431
+
432
+ Args:
433
+ config_class ([`PretrainedConfig`]):
434
+ The configuration corresponding to the model to register.
435
+ image_processor_class ([`ImageProcessingMixin`]): The image processor to register.
436
+ """
437
+ IMAGE_PROCESSOR_MAPPING.register(config_class, image_processor_class, exist_ok=exist_ok)
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_auto.py ADDED
@@ -0,0 +1,1705 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Auto Model class."""
16
+
17
+ import warnings
18
+ from collections import OrderedDict
19
+
20
+ from ...utils import logging
21
+ from .auto_factory import (
22
+ _BaseAutoBackboneClass,
23
+ _BaseAutoModelClass,
24
+ _LazyAutoMapping,
25
+ auto_class_update,
26
+ )
27
+ from .configuration_auto import CONFIG_MAPPING_NAMES
28
+
29
+
30
+ logger = logging.get_logger(__name__)
31
+
32
+ MODEL_MAPPING_NAMES = OrderedDict(
33
+ [
34
+ # Base model mapping
35
+ ("albert", "AlbertModel"),
36
+ ("align", "AlignModel"),
37
+ ("altclip", "AltCLIPModel"),
38
+ ("audio-spectrogram-transformer", "ASTModel"),
39
+ ("autoformer", "AutoformerModel"),
40
+ ("bark", "BarkModel"),
41
+ ("bart", "BartModel"),
42
+ ("beit", "BeitModel"),
43
+ ("bert", "BertModel"),
44
+ ("bert-generation", "BertGenerationEncoder"),
45
+ ("big_bird", "BigBirdModel"),
46
+ ("bigbird_pegasus", "BigBirdPegasusModel"),
47
+ ("biogpt", "BioGptModel"),
48
+ ("bit", "BitModel"),
49
+ ("blenderbot", "BlenderbotModel"),
50
+ ("blenderbot-small", "BlenderbotSmallModel"),
51
+ ("blip", "BlipModel"),
52
+ ("blip-2", "Blip2Model"),
53
+ ("bloom", "BloomModel"),
54
+ ("bridgetower", "BridgeTowerModel"),
55
+ ("bros", "BrosModel"),
56
+ ("camembert", "CamembertModel"),
57
+ ("canine", "CanineModel"),
58
+ ("chinese_clip", "ChineseCLIPModel"),
59
+ ("chinese_clip_vision_model", "ChineseCLIPVisionModel"),
60
+ ("clap", "ClapModel"),
61
+ ("clip", "CLIPModel"),
62
+ ("clip_vision_model", "CLIPVisionModel"),
63
+ ("clipseg", "CLIPSegModel"),
64
+ ("clvp", "ClvpModelForConditionalGeneration"),
65
+ ("code_llama", "LlamaModel"),
66
+ ("codegen", "CodeGenModel"),
67
+ ("cohere", "CohereModel"),
68
+ ("conditional_detr", "ConditionalDetrModel"),
69
+ ("convbert", "ConvBertModel"),
70
+ ("convnext", "ConvNextModel"),
71
+ ("convnextv2", "ConvNextV2Model"),
72
+ ("cpmant", "CpmAntModel"),
73
+ ("ctrl", "CTRLModel"),
74
+ ("cvt", "CvtModel"),
75
+ ("data2vec-audio", "Data2VecAudioModel"),
76
+ ("data2vec-text", "Data2VecTextModel"),
77
+ ("data2vec-vision", "Data2VecVisionModel"),
78
+ ("dbrx", "DbrxModel"),
79
+ ("deberta", "DebertaModel"),
80
+ ("deberta-v2", "DebertaV2Model"),
81
+ ("decision_transformer", "DecisionTransformerModel"),
82
+ ("deformable_detr", "DeformableDetrModel"),
83
+ ("deit", "DeiTModel"),
84
+ ("deta", "DetaModel"),
85
+ ("detr", "DetrModel"),
86
+ ("dinat", "DinatModel"),
87
+ ("dinov2", "Dinov2Model"),
88
+ ("distilbert", "DistilBertModel"),
89
+ ("donut-swin", "DonutSwinModel"),
90
+ ("dpr", "DPRQuestionEncoder"),
91
+ ("dpt", "DPTModel"),
92
+ ("efficientformer", "EfficientFormerModel"),
93
+ ("efficientnet", "EfficientNetModel"),
94
+ ("electra", "ElectraModel"),
95
+ ("encodec", "EncodecModel"),
96
+ ("ernie", "ErnieModel"),
97
+ ("ernie_m", "ErnieMModel"),
98
+ ("esm", "EsmModel"),
99
+ ("falcon", "FalconModel"),
100
+ ("fastspeech2_conformer", "FastSpeech2ConformerModel"),
101
+ ("flaubert", "FlaubertModel"),
102
+ ("flava", "FlavaModel"),
103
+ ("fnet", "FNetModel"),
104
+ ("focalnet", "FocalNetModel"),
105
+ ("fsmt", "FSMTModel"),
106
+ ("funnel", ("FunnelModel", "FunnelBaseModel")),
107
+ ("gemma", "GemmaModel"),
108
+ ("git", "GitModel"),
109
+ ("glpn", "GLPNModel"),
110
+ ("gpt-sw3", "GPT2Model"),
111
+ ("gpt2", "GPT2Model"),
112
+ ("gpt_bigcode", "GPTBigCodeModel"),
113
+ ("gpt_neo", "GPTNeoModel"),
114
+ ("gpt_neox", "GPTNeoXModel"),
115
+ ("gpt_neox_japanese", "GPTNeoXJapaneseModel"),
116
+ ("gptj", "GPTJModel"),
117
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
118
+ ("graphormer", "GraphormerModel"),
119
+ ("grounding-dino", "GroundingDinoModel"),
120
+ ("groupvit", "GroupViTModel"),
121
+ ("hubert", "HubertModel"),
122
+ ("ibert", "IBertModel"),
123
+ ("idefics", "IdeficsModel"),
124
+ ("idefics2", "Idefics2Model"),
125
+ ("imagegpt", "ImageGPTModel"),
126
+ ("informer", "InformerModel"),
127
+ ("jamba", "JambaModel"),
128
+ ("jukebox", "JukeboxModel"),
129
+ ("kosmos-2", "Kosmos2Model"),
130
+ ("layoutlm", "LayoutLMModel"),
131
+ ("layoutlmv2", "LayoutLMv2Model"),
132
+ ("layoutlmv3", "LayoutLMv3Model"),
133
+ ("led", "LEDModel"),
134
+ ("levit", "LevitModel"),
135
+ ("lilt", "LiltModel"),
136
+ ("llama", "LlamaModel"),
137
+ ("longformer", "LongformerModel"),
138
+ ("longt5", "LongT5Model"),
139
+ ("luke", "LukeModel"),
140
+ ("lxmert", "LxmertModel"),
141
+ ("m2m_100", "M2M100Model"),
142
+ ("mamba", "MambaModel"),
143
+ ("marian", "MarianModel"),
144
+ ("markuplm", "MarkupLMModel"),
145
+ ("mask2former", "Mask2FormerModel"),
146
+ ("maskformer", "MaskFormerModel"),
147
+ ("maskformer-swin", "MaskFormerSwinModel"),
148
+ ("mbart", "MBartModel"),
149
+ ("mctct", "MCTCTModel"),
150
+ ("mega", "MegaModel"),
151
+ ("megatron-bert", "MegatronBertModel"),
152
+ ("mgp-str", "MgpstrForSceneTextRecognition"),
153
+ ("mistral", "MistralModel"),
154
+ ("mixtral", "MixtralModel"),
155
+ ("mobilebert", "MobileBertModel"),
156
+ ("mobilenet_v1", "MobileNetV1Model"),
157
+ ("mobilenet_v2", "MobileNetV2Model"),
158
+ ("mobilevit", "MobileViTModel"),
159
+ ("mobilevitv2", "MobileViTV2Model"),
160
+ ("mpnet", "MPNetModel"),
161
+ ("mpt", "MptModel"),
162
+ ("mra", "MraModel"),
163
+ ("mt5", "MT5Model"),
164
+ ("mvp", "MvpModel"),
165
+ ("nat", "NatModel"),
166
+ ("nezha", "NezhaModel"),
167
+ ("nllb-moe", "NllbMoeModel"),
168
+ ("nystromformer", "NystromformerModel"),
169
+ ("olmo", "OlmoModel"),
170
+ ("oneformer", "OneFormerModel"),
171
+ ("open-llama", "OpenLlamaModel"),
172
+ ("openai-gpt", "OpenAIGPTModel"),
173
+ ("opt", "OPTModel"),
174
+ ("owlv2", "Owlv2Model"),
175
+ ("owlvit", "OwlViTModel"),
176
+ ("patchtsmixer", "PatchTSMixerModel"),
177
+ ("patchtst", "PatchTSTModel"),
178
+ ("pegasus", "PegasusModel"),
179
+ ("pegasus_x", "PegasusXModel"),
180
+ ("perceiver", "PerceiverModel"),
181
+ ("persimmon", "PersimmonModel"),
182
+ ("phi", "PhiModel"),
183
+ ("plbart", "PLBartModel"),
184
+ ("poolformer", "PoolFormerModel"),
185
+ ("prophetnet", "ProphetNetModel"),
186
+ ("pvt", "PvtModel"),
187
+ ("pvt_v2", "PvtV2Model"),
188
+ ("qdqbert", "QDQBertModel"),
189
+ ("qwen2", "Qwen2Model"),
190
+ ("qwen2_moe", "Qwen2MoeModel"),
191
+ ("recurrent_gemma", "RecurrentGemmaModel"),
192
+ ("reformer", "ReformerModel"),
193
+ ("regnet", "RegNetModel"),
194
+ ("rembert", "RemBertModel"),
195
+ ("resnet", "ResNetModel"),
196
+ ("retribert", "RetriBertModel"),
197
+ ("roberta", "RobertaModel"),
198
+ ("roberta-prelayernorm", "RobertaPreLayerNormModel"),
199
+ ("roc_bert", "RoCBertModel"),
200
+ ("roformer", "RoFormerModel"),
201
+ ("rwkv", "RwkvModel"),
202
+ ("sam", "SamModel"),
203
+ ("seamless_m4t", "SeamlessM4TModel"),
204
+ ("seamless_m4t_v2", "SeamlessM4Tv2Model"),
205
+ ("segformer", "SegformerModel"),
206
+ ("seggpt", "SegGptModel"),
207
+ ("sew", "SEWModel"),
208
+ ("sew-d", "SEWDModel"),
209
+ ("siglip", "SiglipModel"),
210
+ ("siglip_vision_model", "SiglipVisionModel"),
211
+ ("speech_to_text", "Speech2TextModel"),
212
+ ("speecht5", "SpeechT5Model"),
213
+ ("splinter", "SplinterModel"),
214
+ ("squeezebert", "SqueezeBertModel"),
215
+ ("stablelm", "StableLmModel"),
216
+ ("starcoder2", "Starcoder2Model"),
217
+ ("swiftformer", "SwiftFormerModel"),
218
+ ("swin", "SwinModel"),
219
+ ("swin2sr", "Swin2SRModel"),
220
+ ("swinv2", "Swinv2Model"),
221
+ ("switch_transformers", "SwitchTransformersModel"),
222
+ ("t5", "T5Model"),
223
+ ("table-transformer", "TableTransformerModel"),
224
+ ("tapas", "TapasModel"),
225
+ ("time_series_transformer", "TimeSeriesTransformerModel"),
226
+ ("timesformer", "TimesformerModel"),
227
+ ("timm_backbone", "TimmBackbone"),
228
+ ("trajectory_transformer", "TrajectoryTransformerModel"),
229
+ ("transfo-xl", "TransfoXLModel"),
230
+ ("tvlt", "TvltModel"),
231
+ ("tvp", "TvpModel"),
232
+ ("udop", "UdopModel"),
233
+ ("umt5", "UMT5Model"),
234
+ ("unispeech", "UniSpeechModel"),
235
+ ("unispeech-sat", "UniSpeechSatModel"),
236
+ ("univnet", "UnivNetModel"),
237
+ ("van", "VanModel"),
238
+ ("videomae", "VideoMAEModel"),
239
+ ("vilt", "ViltModel"),
240
+ ("vision-text-dual-encoder", "VisionTextDualEncoderModel"),
241
+ ("visual_bert", "VisualBertModel"),
242
+ ("vit", "ViTModel"),
243
+ ("vit_hybrid", "ViTHybridModel"),
244
+ ("vit_mae", "ViTMAEModel"),
245
+ ("vit_msn", "ViTMSNModel"),
246
+ ("vitdet", "VitDetModel"),
247
+ ("vits", "VitsModel"),
248
+ ("vivit", "VivitModel"),
249
+ ("wav2vec2", "Wav2Vec2Model"),
250
+ ("wav2vec2-bert", "Wav2Vec2BertModel"),
251
+ ("wav2vec2-conformer", "Wav2Vec2ConformerModel"),
252
+ ("wavlm", "WavLMModel"),
253
+ ("whisper", "WhisperModel"),
254
+ ("xclip", "XCLIPModel"),
255
+ ("xglm", "XGLMModel"),
256
+ ("xlm", "XLMModel"),
257
+ ("xlm-prophetnet", "XLMProphetNetModel"),
258
+ ("xlm-roberta", "XLMRobertaModel"),
259
+ ("xlm-roberta-xl", "XLMRobertaXLModel"),
260
+ ("xlnet", "XLNetModel"),
261
+ ("xmod", "XmodModel"),
262
+ ("yolos", "YolosModel"),
263
+ ("yoso", "YosoModel"),
264
+ ]
265
+ )
266
+
267
+ MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
268
+ [
269
+ # Model for pre-training mapping
270
+ ("albert", "AlbertForPreTraining"),
271
+ ("bart", "BartForConditionalGeneration"),
272
+ ("bert", "BertForPreTraining"),
273
+ ("big_bird", "BigBirdForPreTraining"),
274
+ ("bloom", "BloomForCausalLM"),
275
+ ("camembert", "CamembertForMaskedLM"),
276
+ ("ctrl", "CTRLLMHeadModel"),
277
+ ("data2vec-text", "Data2VecTextForMaskedLM"),
278
+ ("deberta", "DebertaForMaskedLM"),
279
+ ("deberta-v2", "DebertaV2ForMaskedLM"),
280
+ ("distilbert", "DistilBertForMaskedLM"),
281
+ ("electra", "ElectraForPreTraining"),
282
+ ("ernie", "ErnieForPreTraining"),
283
+ ("flaubert", "FlaubertWithLMHeadModel"),
284
+ ("flava", "FlavaForPreTraining"),
285
+ ("fnet", "FNetForPreTraining"),
286
+ ("fsmt", "FSMTForConditionalGeneration"),
287
+ ("funnel", "FunnelForPreTraining"),
288
+ ("gpt-sw3", "GPT2LMHeadModel"),
289
+ ("gpt2", "GPT2LMHeadModel"),
290
+ ("gpt_bigcode", "GPTBigCodeForCausalLM"),
291
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
292
+ ("ibert", "IBertForMaskedLM"),
293
+ ("idefics", "IdeficsForVisionText2Text"),
294
+ ("idefics2", "Idefics2ForConditionalGeneration"),
295
+ ("layoutlm", "LayoutLMForMaskedLM"),
296
+ ("llava", "LlavaForConditionalGeneration"),
297
+ ("llava_next", "LlavaNextForConditionalGeneration"),
298
+ ("longformer", "LongformerForMaskedLM"),
299
+ ("luke", "LukeForMaskedLM"),
300
+ ("lxmert", "LxmertForPreTraining"),
301
+ ("mamba", "MambaForCausalLM"),
302
+ ("mega", "MegaForMaskedLM"),
303
+ ("megatron-bert", "MegatronBertForPreTraining"),
304
+ ("mobilebert", "MobileBertForPreTraining"),
305
+ ("mpnet", "MPNetForMaskedLM"),
306
+ ("mpt", "MptForCausalLM"),
307
+ ("mra", "MraForMaskedLM"),
308
+ ("mvp", "MvpForConditionalGeneration"),
309
+ ("nezha", "NezhaForPreTraining"),
310
+ ("nllb-moe", "NllbMoeForConditionalGeneration"),
311
+ ("openai-gpt", "OpenAIGPTLMHeadModel"),
312
+ ("retribert", "RetriBertModel"),
313
+ ("roberta", "RobertaForMaskedLM"),
314
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"),
315
+ ("roc_bert", "RoCBertForPreTraining"),
316
+ ("rwkv", "RwkvForCausalLM"),
317
+ ("splinter", "SplinterForPreTraining"),
318
+ ("squeezebert", "SqueezeBertForMaskedLM"),
319
+ ("switch_transformers", "SwitchTransformersForConditionalGeneration"),
320
+ ("t5", "T5ForConditionalGeneration"),
321
+ ("tapas", "TapasForMaskedLM"),
322
+ ("transfo-xl", "TransfoXLLMHeadModel"),
323
+ ("tvlt", "TvltForPreTraining"),
324
+ ("unispeech", "UniSpeechForPreTraining"),
325
+ ("unispeech-sat", "UniSpeechSatForPreTraining"),
326
+ ("videomae", "VideoMAEForPreTraining"),
327
+ ("vipllava", "VipLlavaForConditionalGeneration"),
328
+ ("visual_bert", "VisualBertForPreTraining"),
329
+ ("vit_mae", "ViTMAEForPreTraining"),
330
+ ("wav2vec2", "Wav2Vec2ForPreTraining"),
331
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForPreTraining"),
332
+ ("xlm", "XLMWithLMHeadModel"),
333
+ ("xlm-roberta", "XLMRobertaForMaskedLM"),
334
+ ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"),
335
+ ("xlnet", "XLNetLMHeadModel"),
336
+ ("xmod", "XmodForMaskedLM"),
337
+ ]
338
+ )
339
+
340
+ MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
341
+ [
342
+ # Model with LM heads mapping
343
+ ("albert", "AlbertForMaskedLM"),
344
+ ("bart", "BartForConditionalGeneration"),
345
+ ("bert", "BertForMaskedLM"),
346
+ ("big_bird", "BigBirdForMaskedLM"),
347
+ ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"),
348
+ ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"),
349
+ ("bloom", "BloomForCausalLM"),
350
+ ("camembert", "CamembertForMaskedLM"),
351
+ ("codegen", "CodeGenForCausalLM"),
352
+ ("convbert", "ConvBertForMaskedLM"),
353
+ ("cpmant", "CpmAntForCausalLM"),
354
+ ("ctrl", "CTRLLMHeadModel"),
355
+ ("data2vec-text", "Data2VecTextForMaskedLM"),
356
+ ("deberta", "DebertaForMaskedLM"),
357
+ ("deberta-v2", "DebertaV2ForMaskedLM"),
358
+ ("distilbert", "DistilBertForMaskedLM"),
359
+ ("electra", "ElectraForMaskedLM"),
360
+ ("encoder-decoder", "EncoderDecoderModel"),
361
+ ("ernie", "ErnieForMaskedLM"),
362
+ ("esm", "EsmForMaskedLM"),
363
+ ("flaubert", "FlaubertWithLMHeadModel"),
364
+ ("fnet", "FNetForMaskedLM"),
365
+ ("fsmt", "FSMTForConditionalGeneration"),
366
+ ("funnel", "FunnelForMaskedLM"),
367
+ ("git", "GitForCausalLM"),
368
+ ("gpt-sw3", "GPT2LMHeadModel"),
369
+ ("gpt2", "GPT2LMHeadModel"),
370
+ ("gpt_bigcode", "GPTBigCodeForCausalLM"),
371
+ ("gpt_neo", "GPTNeoForCausalLM"),
372
+ ("gpt_neox", "GPTNeoXForCausalLM"),
373
+ ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"),
374
+ ("gptj", "GPTJForCausalLM"),
375
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
376
+ ("ibert", "IBertForMaskedLM"),
377
+ ("layoutlm", "LayoutLMForMaskedLM"),
378
+ ("led", "LEDForConditionalGeneration"),
379
+ ("longformer", "LongformerForMaskedLM"),
380
+ ("longt5", "LongT5ForConditionalGeneration"),
381
+ ("luke", "LukeForMaskedLM"),
382
+ ("m2m_100", "M2M100ForConditionalGeneration"),
383
+ ("mamba", "MambaForCausalLM"),
384
+ ("marian", "MarianMTModel"),
385
+ ("mega", "MegaForMaskedLM"),
386
+ ("megatron-bert", "MegatronBertForCausalLM"),
387
+ ("mobilebert", "MobileBertForMaskedLM"),
388
+ ("mpnet", "MPNetForMaskedLM"),
389
+ ("mpt", "MptForCausalLM"),
390
+ ("mra", "MraForMaskedLM"),
391
+ ("mvp", "MvpForConditionalGeneration"),
392
+ ("nezha", "NezhaForMaskedLM"),
393
+ ("nllb-moe", "NllbMoeForConditionalGeneration"),
394
+ ("nystromformer", "NystromformerForMaskedLM"),
395
+ ("openai-gpt", "OpenAIGPTLMHeadModel"),
396
+ ("pegasus_x", "PegasusXForConditionalGeneration"),
397
+ ("plbart", "PLBartForConditionalGeneration"),
398
+ ("pop2piano", "Pop2PianoForConditionalGeneration"),
399
+ ("qdqbert", "QDQBertForMaskedLM"),
400
+ ("reformer", "ReformerModelWithLMHead"),
401
+ ("rembert", "RemBertForMaskedLM"),
402
+ ("roberta", "RobertaForMaskedLM"),
403
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"),
404
+ ("roc_bert", "RoCBertForMaskedLM"),
405
+ ("roformer", "RoFormerForMaskedLM"),
406
+ ("rwkv", "RwkvForCausalLM"),
407
+ ("speech_to_text", "Speech2TextForConditionalGeneration"),
408
+ ("squeezebert", "SqueezeBertForMaskedLM"),
409
+ ("switch_transformers", "SwitchTransformersForConditionalGeneration"),
410
+ ("t5", "T5ForConditionalGeneration"),
411
+ ("tapas", "TapasForMaskedLM"),
412
+ ("transfo-xl", "TransfoXLLMHeadModel"),
413
+ ("wav2vec2", "Wav2Vec2ForMaskedLM"),
414
+ ("whisper", "WhisperForConditionalGeneration"),
415
+ ("xlm", "XLMWithLMHeadModel"),
416
+ ("xlm-roberta", "XLMRobertaForMaskedLM"),
417
+ ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"),
418
+ ("xlnet", "XLNetLMHeadModel"),
419
+ ("xmod", "XmodForMaskedLM"),
420
+ ("yoso", "YosoForMaskedLM"),
421
+ ]
422
+ )
423
+
424
+ MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
425
+ [
426
+ # Model for Causal LM mapping
427
+ ("bart", "BartForCausalLM"),
428
+ ("bert", "BertLMHeadModel"),
429
+ ("bert-generation", "BertGenerationDecoder"),
430
+ ("big_bird", "BigBirdForCausalLM"),
431
+ ("bigbird_pegasus", "BigBirdPegasusForCausalLM"),
432
+ ("biogpt", "BioGptForCausalLM"),
433
+ ("blenderbot", "BlenderbotForCausalLM"),
434
+ ("blenderbot-small", "BlenderbotSmallForCausalLM"),
435
+ ("bloom", "BloomForCausalLM"),
436
+ ("camembert", "CamembertForCausalLM"),
437
+ ("code_llama", "LlamaForCausalLM"),
438
+ ("codegen", "CodeGenForCausalLM"),
439
+ ("cohere", "CohereForCausalLM"),
440
+ ("cpmant", "CpmAntForCausalLM"),
441
+ ("ctrl", "CTRLLMHeadModel"),
442
+ ("data2vec-text", "Data2VecTextForCausalLM"),
443
+ ("dbrx", "DbrxForCausalLM"),
444
+ ("electra", "ElectraForCausalLM"),
445
+ ("ernie", "ErnieForCausalLM"),
446
+ ("falcon", "FalconForCausalLM"),
447
+ ("fuyu", "FuyuForCausalLM"),
448
+ ("gemma", "GemmaForCausalLM"),
449
+ ("git", "GitForCausalLM"),
450
+ ("gpt-sw3", "GPT2LMHeadModel"),
451
+ ("gpt2", "GPT2LMHeadModel"),
452
+ ("gpt_bigcode", "GPTBigCodeForCausalLM"),
453
+ ("gpt_neo", "GPTNeoForCausalLM"),
454
+ ("gpt_neox", "GPTNeoXForCausalLM"),
455
+ ("gpt_neox_japanese", "GPTNeoXJapaneseForCausalLM"),
456
+ ("gptj", "GPTJForCausalLM"),
457
+ ("jamba", "JambaForCausalLM"),
458
+ ("llama", "LlamaForCausalLM"),
459
+ ("mamba", "MambaForCausalLM"),
460
+ ("marian", "MarianForCausalLM"),
461
+ ("mbart", "MBartForCausalLM"),
462
+ ("mega", "MegaForCausalLM"),
463
+ ("megatron-bert", "MegatronBertForCausalLM"),
464
+ ("mistral", "MistralForCausalLM"),
465
+ ("mixtral", "MixtralForCausalLM"),
466
+ ("mpt", "MptForCausalLM"),
467
+ ("musicgen", "MusicgenForCausalLM"),
468
+ ("musicgen_melody", "MusicgenMelodyForCausalLM"),
469
+ ("mvp", "MvpForCausalLM"),
470
+ ("olmo", "OlmoForCausalLM"),
471
+ ("open-llama", "OpenLlamaForCausalLM"),
472
+ ("openai-gpt", "OpenAIGPTLMHeadModel"),
473
+ ("opt", "OPTForCausalLM"),
474
+ ("pegasus", "PegasusForCausalLM"),
475
+ ("persimmon", "PersimmonForCausalLM"),
476
+ ("phi", "PhiForCausalLM"),
477
+ ("plbart", "PLBartForCausalLM"),
478
+ ("prophetnet", "ProphetNetForCausalLM"),
479
+ ("qdqbert", "QDQBertLMHeadModel"),
480
+ ("qwen2", "Qwen2ForCausalLM"),
481
+ ("qwen2_moe", "Qwen2MoeForCausalLM"),
482
+ ("recurrent_gemma", "RecurrentGemmaForCausalLM"),
483
+ ("reformer", "ReformerModelWithLMHead"),
484
+ ("rembert", "RemBertForCausalLM"),
485
+ ("roberta", "RobertaForCausalLM"),
486
+ ("roberta-prelayernorm", "RobertaPreLayerNormForCausalLM"),
487
+ ("roc_bert", "RoCBertForCausalLM"),
488
+ ("roformer", "RoFormerForCausalLM"),
489
+ ("rwkv", "RwkvForCausalLM"),
490
+ ("speech_to_text_2", "Speech2Text2ForCausalLM"),
491
+ ("stablelm", "StableLmForCausalLM"),
492
+ ("starcoder2", "Starcoder2ForCausalLM"),
493
+ ("transfo-xl", "TransfoXLLMHeadModel"),
494
+ ("trocr", "TrOCRForCausalLM"),
495
+ ("whisper", "WhisperForCausalLM"),
496
+ ("xglm", "XGLMForCausalLM"),
497
+ ("xlm", "XLMWithLMHeadModel"),
498
+ ("xlm-prophetnet", "XLMProphetNetForCausalLM"),
499
+ ("xlm-roberta", "XLMRobertaForCausalLM"),
500
+ ("xlm-roberta-xl", "XLMRobertaXLForCausalLM"),
501
+ ("xlnet", "XLNetLMHeadModel"),
502
+ ("xmod", "XmodForCausalLM"),
503
+ ]
504
+ )
505
+
506
+ MODEL_FOR_IMAGE_MAPPING_NAMES = OrderedDict(
507
+ [
508
+ # Model for Image mapping
509
+ ("beit", "BeitModel"),
510
+ ("bit", "BitModel"),
511
+ ("conditional_detr", "ConditionalDetrModel"),
512
+ ("convnext", "ConvNextModel"),
513
+ ("convnextv2", "ConvNextV2Model"),
514
+ ("data2vec-vision", "Data2VecVisionModel"),
515
+ ("deformable_detr", "DeformableDetrModel"),
516
+ ("deit", "DeiTModel"),
517
+ ("deta", "DetaModel"),
518
+ ("detr", "DetrModel"),
519
+ ("dinat", "DinatModel"),
520
+ ("dinov2", "Dinov2Model"),
521
+ ("dpt", "DPTModel"),
522
+ ("efficientformer", "EfficientFormerModel"),
523
+ ("efficientnet", "EfficientNetModel"),
524
+ ("focalnet", "FocalNetModel"),
525
+ ("glpn", "GLPNModel"),
526
+ ("imagegpt", "ImageGPTModel"),
527
+ ("levit", "LevitModel"),
528
+ ("mobilenet_v1", "MobileNetV1Model"),
529
+ ("mobilenet_v2", "MobileNetV2Model"),
530
+ ("mobilevit", "MobileViTModel"),
531
+ ("mobilevitv2", "MobileViTV2Model"),
532
+ ("nat", "NatModel"),
533
+ ("poolformer", "PoolFormerModel"),
534
+ ("pvt", "PvtModel"),
535
+ ("regnet", "RegNetModel"),
536
+ ("resnet", "ResNetModel"),
537
+ ("segformer", "SegformerModel"),
538
+ ("siglip_vision_model", "SiglipVisionModel"),
539
+ ("swiftformer", "SwiftFormerModel"),
540
+ ("swin", "SwinModel"),
541
+ ("swin2sr", "Swin2SRModel"),
542
+ ("swinv2", "Swinv2Model"),
543
+ ("table-transformer", "TableTransformerModel"),
544
+ ("timesformer", "TimesformerModel"),
545
+ ("timm_backbone", "TimmBackbone"),
546
+ ("van", "VanModel"),
547
+ ("videomae", "VideoMAEModel"),
548
+ ("vit", "ViTModel"),
549
+ ("vit_hybrid", "ViTHybridModel"),
550
+ ("vit_mae", "ViTMAEModel"),
551
+ ("vit_msn", "ViTMSNModel"),
552
+ ("vitdet", "VitDetModel"),
553
+ ("vivit", "VivitModel"),
554
+ ("yolos", "YolosModel"),
555
+ ]
556
+ )
557
+
558
+ MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict(
559
+ [
560
+ ("deit", "DeiTForMaskedImageModeling"),
561
+ ("focalnet", "FocalNetForMaskedImageModeling"),
562
+ ("swin", "SwinForMaskedImageModeling"),
563
+ ("swinv2", "Swinv2ForMaskedImageModeling"),
564
+ ("vit", "ViTForMaskedImageModeling"),
565
+ ]
566
+ )
567
+
568
+
569
+ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES = OrderedDict(
570
+ # Model for Causal Image Modeling mapping
571
+ [
572
+ ("imagegpt", "ImageGPTForCausalImageModeling"),
573
+ ]
574
+ )
575
+
576
+ MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
577
+ [
578
+ # Model for Image Classification mapping
579
+ ("beit", "BeitForImageClassification"),
580
+ ("bit", "BitForImageClassification"),
581
+ ("clip", "CLIPForImageClassification"),
582
+ ("convnext", "ConvNextForImageClassification"),
583
+ ("convnextv2", "ConvNextV2ForImageClassification"),
584
+ ("cvt", "CvtForImageClassification"),
585
+ ("data2vec-vision", "Data2VecVisionForImageClassification"),
586
+ (
587
+ "deit",
588
+ ("DeiTForImageClassification", "DeiTForImageClassificationWithTeacher"),
589
+ ),
590
+ ("dinat", "DinatForImageClassification"),
591
+ ("dinov2", "Dinov2ForImageClassification"),
592
+ (
593
+ "efficientformer",
594
+ (
595
+ "EfficientFormerForImageClassification",
596
+ "EfficientFormerForImageClassificationWithTeacher",
597
+ ),
598
+ ),
599
+ ("efficientnet", "EfficientNetForImageClassification"),
600
+ ("focalnet", "FocalNetForImageClassification"),
601
+ ("imagegpt", "ImageGPTForImageClassification"),
602
+ (
603
+ "levit",
604
+ ("LevitForImageClassification", "LevitForImageClassificationWithTeacher"),
605
+ ),
606
+ ("mobilenet_v1", "MobileNetV1ForImageClassification"),
607
+ ("mobilenet_v2", "MobileNetV2ForImageClassification"),
608
+ ("mobilevit", "MobileViTForImageClassification"),
609
+ ("mobilevitv2", "MobileViTV2ForImageClassification"),
610
+ ("nat", "NatForImageClassification"),
611
+ (
612
+ "perceiver",
613
+ (
614
+ "PerceiverForImageClassificationLearned",
615
+ "PerceiverForImageClassificationFourier",
616
+ "PerceiverForImageClassificationConvProcessing",
617
+ ),
618
+ ),
619
+ ("poolformer", "PoolFormerForImageClassification"),
620
+ ("pvt", "PvtForImageClassification"),
621
+ ("pvt_v2", "PvtV2ForImageClassification"),
622
+ ("regnet", "RegNetForImageClassification"),
623
+ ("resnet", "ResNetForImageClassification"),
624
+ ("segformer", "SegformerForImageClassification"),
625
+ ("siglip", "SiglipForImageClassification"),
626
+ ("swiftformer", "SwiftFormerForImageClassification"),
627
+ ("swin", "SwinForImageClassification"),
628
+ ("swinv2", "Swinv2ForImageClassification"),
629
+ ("van", "VanForImageClassification"),
630
+ ("vit", "ViTForImageClassification"),
631
+ ("vit_hybrid", "ViTHybridForImageClassification"),
632
+ ("vit_msn", "ViTMSNForImageClassification"),
633
+ ]
634
+ )
635
+
636
+ MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES = OrderedDict(
637
+ [
638
+ # Do not add new models here, this class will be deprecated in the future.
639
+ # Model for Image Segmentation mapping
640
+ ("detr", "DetrForSegmentation"),
641
+ ]
642
+ )
643
+
644
+ MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict(
645
+ [
646
+ # Model for Semantic Segmentation mapping
647
+ ("beit", "BeitForSemanticSegmentation"),
648
+ ("data2vec-vision", "Data2VecVisionForSemanticSegmentation"),
649
+ ("dpt", "DPTForSemanticSegmentation"),
650
+ ("mobilenet_v2", "MobileNetV2ForSemanticSegmentation"),
651
+ ("mobilevit", "MobileViTForSemanticSegmentation"),
652
+ ("mobilevitv2", "MobileViTV2ForSemanticSegmentation"),
653
+ ("segformer", "SegformerForSemanticSegmentation"),
654
+ ("upernet", "UperNetForSemanticSegmentation"),
655
+ ]
656
+ )
657
+
658
+ MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES = OrderedDict(
659
+ [
660
+ # Model for Instance Segmentation mapping
661
+ # MaskFormerForInstanceSegmentation can be removed from this mapping in v5
662
+ ("maskformer", "MaskFormerForInstanceSegmentation"),
663
+ ]
664
+ )
665
+
666
+ MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES = OrderedDict(
667
+ [
668
+ # Model for Universal Segmentation mapping
669
+ ("detr", "DetrForSegmentation"),
670
+ ("mask2former", "Mask2FormerForUniversalSegmentation"),
671
+ ("maskformer", "MaskFormerForInstanceSegmentation"),
672
+ ("oneformer", "OneFormerForUniversalSegmentation"),
673
+ ]
674
+ )
675
+
676
+ MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
677
+ [
678
+ ("timesformer", "TimesformerForVideoClassification"),
679
+ ("videomae", "VideoMAEForVideoClassification"),
680
+ ("vivit", "VivitForVideoClassification"),
681
+ ]
682
+ )
683
+
684
+ MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict(
685
+ [
686
+ ("blip", "BlipForConditionalGeneration"),
687
+ ("blip-2", "Blip2ForConditionalGeneration"),
688
+ ("git", "GitForCausalLM"),
689
+ ("idefics2", "Idefics2ForConditionalGeneration"),
690
+ ("instructblip", "InstructBlipForConditionalGeneration"),
691
+ ("kosmos-2", "Kosmos2ForConditionalGeneration"),
692
+ ("llava", "LlavaForConditionalGeneration"),
693
+ ("llava_next", "LlavaNextForConditionalGeneration"),
694
+ ("pix2struct", "Pix2StructForConditionalGeneration"),
695
+ ("vipllava", "VipLlavaForConditionalGeneration"),
696
+ ("vision-encoder-decoder", "VisionEncoderDecoderModel"),
697
+ ]
698
+ )
699
+
700
+ MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
701
+ [
702
+ # Model for Masked LM mapping
703
+ ("albert", "AlbertForMaskedLM"),
704
+ ("bart", "BartForConditionalGeneration"),
705
+ ("bert", "BertForMaskedLM"),
706
+ ("big_bird", "BigBirdForMaskedLM"),
707
+ ("camembert", "CamembertForMaskedLM"),
708
+ ("convbert", "ConvBertForMaskedLM"),
709
+ ("data2vec-text", "Data2VecTextForMaskedLM"),
710
+ ("deberta", "DebertaForMaskedLM"),
711
+ ("deberta-v2", "DebertaV2ForMaskedLM"),
712
+ ("distilbert", "DistilBertForMaskedLM"),
713
+ ("electra", "ElectraForMaskedLM"),
714
+ ("ernie", "ErnieForMaskedLM"),
715
+ ("esm", "EsmForMaskedLM"),
716
+ ("flaubert", "FlaubertWithLMHeadModel"),
717
+ ("fnet", "FNetForMaskedLM"),
718
+ ("funnel", "FunnelForMaskedLM"),
719
+ ("ibert", "IBertForMaskedLM"),
720
+ ("layoutlm", "LayoutLMForMaskedLM"),
721
+ ("longformer", "LongformerForMaskedLM"),
722
+ ("luke", "LukeForMaskedLM"),
723
+ ("mbart", "MBartForConditionalGeneration"),
724
+ ("mega", "MegaForMaskedLM"),
725
+ ("megatron-bert", "MegatronBertForMaskedLM"),
726
+ ("mobilebert", "MobileBertForMaskedLM"),
727
+ ("mpnet", "MPNetForMaskedLM"),
728
+ ("mra", "MraForMaskedLM"),
729
+ ("mvp", "MvpForConditionalGeneration"),
730
+ ("nezha", "NezhaForMaskedLM"),
731
+ ("nystromformer", "NystromformerForMaskedLM"),
732
+ ("perceiver", "PerceiverForMaskedLM"),
733
+ ("qdqbert", "QDQBertForMaskedLM"),
734
+ ("reformer", "ReformerForMaskedLM"),
735
+ ("rembert", "RemBertForMaskedLM"),
736
+ ("roberta", "RobertaForMaskedLM"),
737
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMaskedLM"),
738
+ ("roc_bert", "RoCBertForMaskedLM"),
739
+ ("roformer", "RoFormerForMaskedLM"),
740
+ ("squeezebert", "SqueezeBertForMaskedLM"),
741
+ ("tapas", "TapasForMaskedLM"),
742
+ ("wav2vec2", "Wav2Vec2ForMaskedLM"),
743
+ ("xlm", "XLMWithLMHeadModel"),
744
+ ("xlm-roberta", "XLMRobertaForMaskedLM"),
745
+ ("xlm-roberta-xl", "XLMRobertaXLForMaskedLM"),
746
+ ("xmod", "XmodForMaskedLM"),
747
+ ("yoso", "YosoForMaskedLM"),
748
+ ]
749
+ )
750
+
751
+ MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict(
752
+ [
753
+ # Model for Object Detection mapping
754
+ ("conditional_detr", "ConditionalDetrForObjectDetection"),
755
+ ("deformable_detr", "DeformableDetrForObjectDetection"),
756
+ ("deta", "DetaForObjectDetection"),
757
+ ("detr", "DetrForObjectDetection"),
758
+ ("table-transformer", "TableTransformerForObjectDetection"),
759
+ ("yolos", "YolosForObjectDetection"),
760
+ ]
761
+ )
762
+
763
+ MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES = OrderedDict(
764
+ [
765
+ # Model for Zero Shot Object Detection mapping
766
+ ("grounding-dino", "GroundingDinoForObjectDetection"),
767
+ ("owlv2", "Owlv2ForObjectDetection"),
768
+ ("owlvit", "OwlViTForObjectDetection"),
769
+ ]
770
+ )
771
+
772
+ MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES = OrderedDict(
773
+ [
774
+ # Model for depth estimation mapping
775
+ ("depth_anything", "DepthAnythingForDepthEstimation"),
776
+ ("dpt", "DPTForDepthEstimation"),
777
+ ("glpn", "GLPNForDepthEstimation"),
778
+ ]
779
+ )
780
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
781
+ [
782
+ # Model for Seq2Seq Causal LM mapping
783
+ ("bart", "BartForConditionalGeneration"),
784
+ ("bigbird_pegasus", "BigBirdPegasusForConditionalGeneration"),
785
+ ("blenderbot", "BlenderbotForConditionalGeneration"),
786
+ ("blenderbot-small", "BlenderbotSmallForConditionalGeneration"),
787
+ ("encoder-decoder", "EncoderDecoderModel"),
788
+ ("fsmt", "FSMTForConditionalGeneration"),
789
+ ("gptsan-japanese", "GPTSanJapaneseForConditionalGeneration"),
790
+ ("led", "LEDForConditionalGeneration"),
791
+ ("longt5", "LongT5ForConditionalGeneration"),
792
+ ("m2m_100", "M2M100ForConditionalGeneration"),
793
+ ("marian", "MarianMTModel"),
794
+ ("mbart", "MBartForConditionalGeneration"),
795
+ ("mt5", "MT5ForConditionalGeneration"),
796
+ ("mvp", "MvpForConditionalGeneration"),
797
+ ("nllb-moe", "NllbMoeForConditionalGeneration"),
798
+ ("pegasus", "PegasusForConditionalGeneration"),
799
+ ("pegasus_x", "PegasusXForConditionalGeneration"),
800
+ ("plbart", "PLBartForConditionalGeneration"),
801
+ ("prophetnet", "ProphetNetForConditionalGeneration"),
802
+ ("seamless_m4t", "SeamlessM4TForTextToText"),
803
+ ("seamless_m4t_v2", "SeamlessM4Tv2ForTextToText"),
804
+ ("switch_transformers", "SwitchTransformersForConditionalGeneration"),
805
+ ("t5", "T5ForConditionalGeneration"),
806
+ ("umt5", "UMT5ForConditionalGeneration"),
807
+ ("xlm-prophetnet", "XLMProphetNetForConditionalGeneration"),
808
+ ]
809
+ )
810
+
811
+ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
812
+ [
813
+ ("pop2piano", "Pop2PianoForConditionalGeneration"),
814
+ ("seamless_m4t", "SeamlessM4TForSpeechToText"),
815
+ ("seamless_m4t_v2", "SeamlessM4Tv2ForSpeechToText"),
816
+ ("speech-encoder-decoder", "SpeechEncoderDecoderModel"),
817
+ ("speech_to_text", "Speech2TextForConditionalGeneration"),
818
+ ("speecht5", "SpeechT5ForSpeechToText"),
819
+ ("whisper", "WhisperForConditionalGeneration"),
820
+ ]
821
+ )
822
+
823
+ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
824
+ [
825
+ # Model for Sequence Classification mapping
826
+ ("albert", "AlbertForSequenceClassification"),
827
+ ("bart", "BartForSequenceClassification"),
828
+ ("bert", "BertForSequenceClassification"),
829
+ ("big_bird", "BigBirdForSequenceClassification"),
830
+ ("bigbird_pegasus", "BigBirdPegasusForSequenceClassification"),
831
+ ("biogpt", "BioGptForSequenceClassification"),
832
+ ("bloom", "BloomForSequenceClassification"),
833
+ ("camembert", "CamembertForSequenceClassification"),
834
+ ("canine", "CanineForSequenceClassification"),
835
+ ("code_llama", "LlamaForSequenceClassification"),
836
+ ("convbert", "ConvBertForSequenceClassification"),
837
+ ("ctrl", "CTRLForSequenceClassification"),
838
+ ("data2vec-text", "Data2VecTextForSequenceClassification"),
839
+ ("deberta", "DebertaForSequenceClassification"),
840
+ ("deberta-v2", "DebertaV2ForSequenceClassification"),
841
+ ("distilbert", "DistilBertForSequenceClassification"),
842
+ ("electra", "ElectraForSequenceClassification"),
843
+ ("ernie", "ErnieForSequenceClassification"),
844
+ ("ernie_m", "ErnieMForSequenceClassification"),
845
+ ("esm", "EsmForSequenceClassification"),
846
+ ("falcon", "FalconForSequenceClassification"),
847
+ ("flaubert", "FlaubertForSequenceClassification"),
848
+ ("fnet", "FNetForSequenceClassification"),
849
+ ("funnel", "FunnelForSequenceClassification"),
850
+ ("gemma", "GemmaForSequenceClassification"),
851
+ ("gpt-sw3", "GPT2ForSequenceClassification"),
852
+ ("gpt2", "GPT2ForSequenceClassification"),
853
+ ("gpt_bigcode", "GPTBigCodeForSequenceClassification"),
854
+ ("gpt_neo", "GPTNeoForSequenceClassification"),
855
+ ("gpt_neox", "GPTNeoXForSequenceClassification"),
856
+ ("gptj", "GPTJForSequenceClassification"),
857
+ ("ibert", "IBertForSequenceClassification"),
858
+ ("jamba", "JambaForSequenceClassification"),
859
+ ("layoutlm", "LayoutLMForSequenceClassification"),
860
+ ("layoutlmv2", "LayoutLMv2ForSequenceClassification"),
861
+ ("layoutlmv3", "LayoutLMv3ForSequenceClassification"),
862
+ ("led", "LEDForSequenceClassification"),
863
+ ("lilt", "LiltForSequenceClassification"),
864
+ ("llama", "LlamaForSequenceClassification"),
865
+ ("longformer", "LongformerForSequenceClassification"),
866
+ ("luke", "LukeForSequenceClassification"),
867
+ ("markuplm", "MarkupLMForSequenceClassification"),
868
+ ("mbart", "MBartForSequenceClassification"),
869
+ ("mega", "MegaForSequenceClassification"),
870
+ ("megatron-bert", "MegatronBertForSequenceClassification"),
871
+ ("mistral", "MistralForSequenceClassification"),
872
+ ("mixtral", "MixtralForSequenceClassification"),
873
+ ("mobilebert", "MobileBertForSequenceClassification"),
874
+ ("mpnet", "MPNetForSequenceClassification"),
875
+ ("mpt", "MptForSequenceClassification"),
876
+ ("mra", "MraForSequenceClassification"),
877
+ ("mt5", "MT5ForSequenceClassification"),
878
+ ("mvp", "MvpForSequenceClassification"),
879
+ ("nezha", "NezhaForSequenceClassification"),
880
+ ("nystromformer", "NystromformerForSequenceClassification"),
881
+ ("open-llama", "OpenLlamaForSequenceClassification"),
882
+ ("openai-gpt", "OpenAIGPTForSequenceClassification"),
883
+ ("opt", "OPTForSequenceClassification"),
884
+ ("perceiver", "PerceiverForSequenceClassification"),
885
+ ("persimmon", "PersimmonForSequenceClassification"),
886
+ ("phi", "PhiForSequenceClassification"),
887
+ ("plbart", "PLBartForSequenceClassification"),
888
+ ("qdqbert", "QDQBertForSequenceClassification"),
889
+ ("qwen2", "Qwen2ForSequenceClassification"),
890
+ ("qwen2_moe", "Qwen2MoeForSequenceClassification"),
891
+ ("reformer", "ReformerForSequenceClassification"),
892
+ ("rembert", "RemBertForSequenceClassification"),
893
+ ("roberta", "RobertaForSequenceClassification"),
894
+ ("roberta-prelayernorm", "RobertaPreLayerNormForSequenceClassification"),
895
+ ("roc_bert", "RoCBertForSequenceClassification"),
896
+ ("roformer", "RoFormerForSequenceClassification"),
897
+ ("squeezebert", "SqueezeBertForSequenceClassification"),
898
+ ("stablelm", "StableLmForSequenceClassification"),
899
+ ("starcoder2", "Starcoder2ForSequenceClassification"),
900
+ ("t5", "T5ForSequenceClassification"),
901
+ ("tapas", "TapasForSequenceClassification"),
902
+ ("transfo-xl", "TransfoXLForSequenceClassification"),
903
+ ("umt5", "UMT5ForSequenceClassification"),
904
+ ("xlm", "XLMForSequenceClassification"),
905
+ ("xlm-roberta", "XLMRobertaForSequenceClassification"),
906
+ ("xlm-roberta-xl", "XLMRobertaXLForSequenceClassification"),
907
+ ("xlnet", "XLNetForSequenceClassification"),
908
+ ("xmod", "XmodForSequenceClassification"),
909
+ ("yoso", "YosoForSequenceClassification"),
910
+ ]
911
+ )
912
+
913
+ MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
914
+ [
915
+ # Model for Question Answering mapping
916
+ ("albert", "AlbertForQuestionAnswering"),
917
+ ("bart", "BartForQuestionAnswering"),
918
+ ("bert", "BertForQuestionAnswering"),
919
+ ("big_bird", "BigBirdForQuestionAnswering"),
920
+ ("bigbird_pegasus", "BigBirdPegasusForQuestionAnswering"),
921
+ ("bloom", "BloomForQuestionAnswering"),
922
+ ("camembert", "CamembertForQuestionAnswering"),
923
+ ("canine", "CanineForQuestionAnswering"),
924
+ ("convbert", "ConvBertForQuestionAnswering"),
925
+ ("data2vec-text", "Data2VecTextForQuestionAnswering"),
926
+ ("deberta", "DebertaForQuestionAnswering"),
927
+ ("deberta-v2", "DebertaV2ForQuestionAnswering"),
928
+ ("distilbert", "DistilBertForQuestionAnswering"),
929
+ ("electra", "ElectraForQuestionAnswering"),
930
+ ("ernie", "ErnieForQuestionAnswering"),
931
+ ("ernie_m", "ErnieMForQuestionAnswering"),
932
+ ("falcon", "FalconForQuestionAnswering"),
933
+ ("flaubert", "FlaubertForQuestionAnsweringSimple"),
934
+ ("fnet", "FNetForQuestionAnswering"),
935
+ ("funnel", "FunnelForQuestionAnswering"),
936
+ ("gpt2", "GPT2ForQuestionAnswering"),
937
+ ("gpt_neo", "GPTNeoForQuestionAnswering"),
938
+ ("gpt_neox", "GPTNeoXForQuestionAnswering"),
939
+ ("gptj", "GPTJForQuestionAnswering"),
940
+ ("ibert", "IBertForQuestionAnswering"),
941
+ ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"),
942
+ ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"),
943
+ ("led", "LEDForQuestionAnswering"),
944
+ ("lilt", "LiltForQuestionAnswering"),
945
+ ("llama", "LlamaForQuestionAnswering"),
946
+ ("longformer", "LongformerForQuestionAnswering"),
947
+ ("luke", "LukeForQuestionAnswering"),
948
+ ("lxmert", "LxmertForQuestionAnswering"),
949
+ ("markuplm", "MarkupLMForQuestionAnswering"),
950
+ ("mbart", "MBartForQuestionAnswering"),
951
+ ("mega", "MegaForQuestionAnswering"),
952
+ ("megatron-bert", "MegatronBertForQuestionAnswering"),
953
+ ("mobilebert", "MobileBertForQuestionAnswering"),
954
+ ("mpnet", "MPNetForQuestionAnswering"),
955
+ ("mpt", "MptForQuestionAnswering"),
956
+ ("mra", "MraForQuestionAnswering"),
957
+ ("mt5", "MT5ForQuestionAnswering"),
958
+ ("mvp", "MvpForQuestionAnswering"),
959
+ ("nezha", "NezhaForQuestionAnswering"),
960
+ ("nystromformer", "NystromformerForQuestionAnswering"),
961
+ ("opt", "OPTForQuestionAnswering"),
962
+ ("qdqbert", "QDQBertForQuestionAnswering"),
963
+ ("reformer", "ReformerForQuestionAnswering"),
964
+ ("rembert", "RemBertForQuestionAnswering"),
965
+ ("roberta", "RobertaForQuestionAnswering"),
966
+ ("roberta-prelayernorm", "RobertaPreLayerNormForQuestionAnswering"),
967
+ ("roc_bert", "RoCBertForQuestionAnswering"),
968
+ ("roformer", "RoFormerForQuestionAnswering"),
969
+ ("splinter", "SplinterForQuestionAnswering"),
970
+ ("squeezebert", "SqueezeBertForQuestionAnswering"),
971
+ ("t5", "T5ForQuestionAnswering"),
972
+ ("umt5", "UMT5ForQuestionAnswering"),
973
+ ("xlm", "XLMForQuestionAnsweringSimple"),
974
+ ("xlm-roberta", "XLMRobertaForQuestionAnswering"),
975
+ ("xlm-roberta-xl", "XLMRobertaXLForQuestionAnswering"),
976
+ ("xlnet", "XLNetForQuestionAnsweringSimple"),
977
+ ("xmod", "XmodForQuestionAnswering"),
978
+ ("yoso", "YosoForQuestionAnswering"),
979
+ ]
980
+ )
981
+
982
+ MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
983
+ [
984
+ # Model for Table Question Answering mapping
985
+ ("tapas", "TapasForQuestionAnswering"),
986
+ ]
987
+ )
988
+
989
+ MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
990
+ [
991
+ ("blip", "BlipForQuestionAnswering"),
992
+ ("blip-2", "Blip2ForConditionalGeneration"),
993
+ ("vilt", "ViltForQuestionAnswering"),
994
+ ]
995
+ )
996
+
997
+ MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
998
+ [
999
+ ("layoutlm", "LayoutLMForQuestionAnswering"),
1000
+ ("layoutlmv2", "LayoutLMv2ForQuestionAnswering"),
1001
+ ("layoutlmv3", "LayoutLMv3ForQuestionAnswering"),
1002
+ ]
1003
+ )
1004
+
1005
+ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
1006
+ [
1007
+ # Model for Token Classification mapping
1008
+ ("albert", "AlbertForTokenClassification"),
1009
+ ("bert", "BertForTokenClassification"),
1010
+ ("big_bird", "BigBirdForTokenClassification"),
1011
+ ("biogpt", "BioGptForTokenClassification"),
1012
+ ("bloom", "BloomForTokenClassification"),
1013
+ ("bros", "BrosForTokenClassification"),
1014
+ ("camembert", "CamembertForTokenClassification"),
1015
+ ("canine", "CanineForTokenClassification"),
1016
+ ("convbert", "ConvBertForTokenClassification"),
1017
+ ("data2vec-text", "Data2VecTextForTokenClassification"),
1018
+ ("deberta", "DebertaForTokenClassification"),
1019
+ ("deberta-v2", "DebertaV2ForTokenClassification"),
1020
+ ("distilbert", "DistilBertForTokenClassification"),
1021
+ ("electra", "ElectraForTokenClassification"),
1022
+ ("ernie", "ErnieForTokenClassification"),
1023
+ ("ernie_m", "ErnieMForTokenClassification"),
1024
+ ("esm", "EsmForTokenClassification"),
1025
+ ("falcon", "FalconForTokenClassification"),
1026
+ ("flaubert", "FlaubertForTokenClassification"),
1027
+ ("fnet", "FNetForTokenClassification"),
1028
+ ("funnel", "FunnelForTokenClassification"),
1029
+ ("gpt-sw3", "GPT2ForTokenClassification"),
1030
+ ("gpt2", "GPT2ForTokenClassification"),
1031
+ ("gpt_bigcode", "GPTBigCodeForTokenClassification"),
1032
+ ("gpt_neo", "GPTNeoForTokenClassification"),
1033
+ ("gpt_neox", "GPTNeoXForTokenClassification"),
1034
+ ("ibert", "IBertForTokenClassification"),
1035
+ ("layoutlm", "LayoutLMForTokenClassification"),
1036
+ ("layoutlmv2", "LayoutLMv2ForTokenClassification"),
1037
+ ("layoutlmv3", "LayoutLMv3ForTokenClassification"),
1038
+ ("lilt", "LiltForTokenClassification"),
1039
+ ("longformer", "LongformerForTokenClassification"),
1040
+ ("luke", "LukeForTokenClassification"),
1041
+ ("markuplm", "MarkupLMForTokenClassification"),
1042
+ ("mega", "MegaForTokenClassification"),
1043
+ ("megatron-bert", "MegatronBertForTokenClassification"),
1044
+ ("mobilebert", "MobileBertForTokenClassification"),
1045
+ ("mpnet", "MPNetForTokenClassification"),
1046
+ ("mpt", "MptForTokenClassification"),
1047
+ ("mra", "MraForTokenClassification"),
1048
+ ("mt5", "MT5ForTokenClassification"),
1049
+ ("nezha", "NezhaForTokenClassification"),
1050
+ ("nystromformer", "NystromformerForTokenClassification"),
1051
+ ("phi", "PhiForTokenClassification"),
1052
+ ("qdqbert", "QDQBertForTokenClassification"),
1053
+ ("rembert", "RemBertForTokenClassification"),
1054
+ ("roberta", "RobertaForTokenClassification"),
1055
+ ("roberta-prelayernorm", "RobertaPreLayerNormForTokenClassification"),
1056
+ ("roc_bert", "RoCBertForTokenClassification"),
1057
+ ("roformer", "RoFormerForTokenClassification"),
1058
+ ("squeezebert", "SqueezeBertForTokenClassification"),
1059
+ ("t5", "T5ForTokenClassification"),
1060
+ ("umt5", "UMT5ForTokenClassification"),
1061
+ ("xlm", "XLMForTokenClassification"),
1062
+ ("xlm-roberta", "XLMRobertaForTokenClassification"),
1063
+ ("xlm-roberta-xl", "XLMRobertaXLForTokenClassification"),
1064
+ ("xlnet", "XLNetForTokenClassification"),
1065
+ ("xmod", "XmodForTokenClassification"),
1066
+ ("yoso", "YosoForTokenClassification"),
1067
+ ]
1068
+ )
1069
+
1070
+ MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
1071
+ [
1072
+ # Model for Multiple Choice mapping
1073
+ ("albert", "AlbertForMultipleChoice"),
1074
+ ("bert", "BertForMultipleChoice"),
1075
+ ("big_bird", "BigBirdForMultipleChoice"),
1076
+ ("camembert", "CamembertForMultipleChoice"),
1077
+ ("canine", "CanineForMultipleChoice"),
1078
+ ("convbert", "ConvBertForMultipleChoice"),
1079
+ ("data2vec-text", "Data2VecTextForMultipleChoice"),
1080
+ ("deberta-v2", "DebertaV2ForMultipleChoice"),
1081
+ ("distilbert", "DistilBertForMultipleChoice"),
1082
+ ("electra", "ElectraForMultipleChoice"),
1083
+ ("ernie", "ErnieForMultipleChoice"),
1084
+ ("ernie_m", "ErnieMForMultipleChoice"),
1085
+ ("flaubert", "FlaubertForMultipleChoice"),
1086
+ ("fnet", "FNetForMultipleChoice"),
1087
+ ("funnel", "FunnelForMultipleChoice"),
1088
+ ("ibert", "IBertForMultipleChoice"),
1089
+ ("longformer", "LongformerForMultipleChoice"),
1090
+ ("luke", "LukeForMultipleChoice"),
1091
+ ("mega", "MegaForMultipleChoice"),
1092
+ ("megatron-bert", "MegatronBertForMultipleChoice"),
1093
+ ("mobilebert", "MobileBertForMultipleChoice"),
1094
+ ("mpnet", "MPNetForMultipleChoice"),
1095
+ ("mra", "MraForMultipleChoice"),
1096
+ ("nezha", "NezhaForMultipleChoice"),
1097
+ ("nystromformer", "NystromformerForMultipleChoice"),
1098
+ ("qdqbert", "QDQBertForMultipleChoice"),
1099
+ ("rembert", "RemBertForMultipleChoice"),
1100
+ ("roberta", "RobertaForMultipleChoice"),
1101
+ ("roberta-prelayernorm", "RobertaPreLayerNormForMultipleChoice"),
1102
+ ("roc_bert", "RoCBertForMultipleChoice"),
1103
+ ("roformer", "RoFormerForMultipleChoice"),
1104
+ ("squeezebert", "SqueezeBertForMultipleChoice"),
1105
+ ("xlm", "XLMForMultipleChoice"),
1106
+ ("xlm-roberta", "XLMRobertaForMultipleChoice"),
1107
+ ("xlm-roberta-xl", "XLMRobertaXLForMultipleChoice"),
1108
+ ("xlnet", "XLNetForMultipleChoice"),
1109
+ ("xmod", "XmodForMultipleChoice"),
1110
+ ("yoso", "YosoForMultipleChoice"),
1111
+ ]
1112
+ )
1113
+
1114
+ MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict(
1115
+ [
1116
+ ("bert", "BertForNextSentencePrediction"),
1117
+ ("ernie", "ErnieForNextSentencePrediction"),
1118
+ ("fnet", "FNetForNextSentencePrediction"),
1119
+ ("megatron-bert", "MegatronBertForNextSentencePrediction"),
1120
+ ("mobilebert", "MobileBertForNextSentencePrediction"),
1121
+ ("nezha", "NezhaForNextSentencePrediction"),
1122
+ ("qdqbert", "QDQBertForNextSentencePrediction"),
1123
+ ]
1124
+ )
1125
+
1126
+ MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
1127
+ [
1128
+ # Model for Audio Classification mapping
1129
+ ("audio-spectrogram-transformer", "ASTForAudioClassification"),
1130
+ ("data2vec-audio", "Data2VecAudioForSequenceClassification"),
1131
+ ("hubert", "HubertForSequenceClassification"),
1132
+ ("sew", "SEWForSequenceClassification"),
1133
+ ("sew-d", "SEWDForSequenceClassification"),
1134
+ ("unispeech", "UniSpeechForSequenceClassification"),
1135
+ ("unispeech-sat", "UniSpeechSatForSequenceClassification"),
1136
+ ("wav2vec2", "Wav2Vec2ForSequenceClassification"),
1137
+ ("wav2vec2-bert", "Wav2Vec2BertForSequenceClassification"),
1138
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForSequenceClassification"),
1139
+ ("wavlm", "WavLMForSequenceClassification"),
1140
+ ("whisper", "WhisperForAudioClassification"),
1141
+ ]
1142
+ )
1143
+
1144
+ MODEL_FOR_CTC_MAPPING_NAMES = OrderedDict(
1145
+ [
1146
+ # Model for Connectionist temporal classification (CTC) mapping
1147
+ ("data2vec-audio", "Data2VecAudioForCTC"),
1148
+ ("hubert", "HubertForCTC"),
1149
+ ("mctct", "MCTCTForCTC"),
1150
+ ("sew", "SEWForCTC"),
1151
+ ("sew-d", "SEWDForCTC"),
1152
+ ("unispeech", "UniSpeechForCTC"),
1153
+ ("unispeech-sat", "UniSpeechSatForCTC"),
1154
+ ("wav2vec2", "Wav2Vec2ForCTC"),
1155
+ ("wav2vec2-bert", "Wav2Vec2BertForCTC"),
1156
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForCTC"),
1157
+ ("wavlm", "WavLMForCTC"),
1158
+ ]
1159
+ )
1160
+
1161
+ MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
1162
+ [
1163
+ # Model for Audio Classification mapping
1164
+ ("data2vec-audio", "Data2VecAudioForAudioFrameClassification"),
1165
+ ("unispeech-sat", "UniSpeechSatForAudioFrameClassification"),
1166
+ ("wav2vec2", "Wav2Vec2ForAudioFrameClassification"),
1167
+ ("wav2vec2-bert", "Wav2Vec2BertForAudioFrameClassification"),
1168
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForAudioFrameClassification"),
1169
+ ("wavlm", "WavLMForAudioFrameClassification"),
1170
+ ]
1171
+ )
1172
+
1173
+ MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES = OrderedDict(
1174
+ [
1175
+ # Model for Audio Classification mapping
1176
+ ("data2vec-audio", "Data2VecAudioForXVector"),
1177
+ ("unispeech-sat", "UniSpeechSatForXVector"),
1178
+ ("wav2vec2", "Wav2Vec2ForXVector"),
1179
+ ("wav2vec2-bert", "Wav2Vec2BertForXVector"),
1180
+ ("wav2vec2-conformer", "Wav2Vec2ConformerForXVector"),
1181
+ ("wavlm", "WavLMForXVector"),
1182
+ ]
1183
+ )
1184
+
1185
+ MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES = OrderedDict(
1186
+ [
1187
+ # Model for Text-To-Spectrogram mapping
1188
+ ("fastspeech2_conformer", "FastSpeech2ConformerModel"),
1189
+ ("speecht5", "SpeechT5ForTextToSpeech"),
1190
+ ]
1191
+ )
1192
+
1193
+ MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES = OrderedDict(
1194
+ [
1195
+ # Model for Text-To-Waveform mapping
1196
+ ("bark", "BarkModel"),
1197
+ ("fastspeech2_conformer", "FastSpeech2ConformerWithHifiGan"),
1198
+ ("musicgen", "MusicgenForConditionalGeneration"),
1199
+ ("musicgen_melody", "MusicgenMelodyForConditionalGeneration"),
1200
+ ("seamless_m4t", "SeamlessM4TForTextToSpeech"),
1201
+ ("seamless_m4t_v2", "SeamlessM4Tv2ForTextToSpeech"),
1202
+ ("vits", "VitsModel"),
1203
+ ]
1204
+ )
1205
+
1206
+ MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
1207
+ [
1208
+ # Model for Zero Shot Image Classification mapping
1209
+ ("align", "AlignModel"),
1210
+ ("altclip", "AltCLIPModel"),
1211
+ ("blip", "BlipModel"),
1212
+ ("chinese_clip", "ChineseCLIPModel"),
1213
+ ("clip", "CLIPModel"),
1214
+ ("clipseg", "CLIPSegModel"),
1215
+ ("siglip", "SiglipModel"),
1216
+ ]
1217
+ )
1218
+
1219
+ MODEL_FOR_BACKBONE_MAPPING_NAMES = OrderedDict(
1220
+ [
1221
+ # Backbone mapping
1222
+ ("beit", "BeitBackbone"),
1223
+ ("bit", "BitBackbone"),
1224
+ ("convnext", "ConvNextBackbone"),
1225
+ ("convnextv2", "ConvNextV2Backbone"),
1226
+ ("dinat", "DinatBackbone"),
1227
+ ("dinov2", "Dinov2Backbone"),
1228
+ ("focalnet", "FocalNetBackbone"),
1229
+ ("maskformer-swin", "MaskFormerSwinBackbone"),
1230
+ ("nat", "NatBackbone"),
1231
+ ("pvt_v2", "PvtV2Backbone"),
1232
+ ("resnet", "ResNetBackbone"),
1233
+ ("swin", "SwinBackbone"),
1234
+ ("swinv2", "Swinv2Backbone"),
1235
+ ("timm_backbone", "TimmBackbone"),
1236
+ ("vitdet", "VitDetBackbone"),
1237
+ ]
1238
+ )
1239
+
1240
+ MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict(
1241
+ [
1242
+ ("sam", "SamModel"),
1243
+ ]
1244
+ )
1245
+
1246
+
1247
+ MODEL_FOR_KEYPOINT_DETECTION_MAPPING_NAMES = OrderedDict(
1248
+ [
1249
+ ("superpoint", "SuperPointForKeypointDetection"),
1250
+ ]
1251
+ )
1252
+
1253
+
1254
+ MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict(
1255
+ [
1256
+ ("albert", "AlbertModel"),
1257
+ ("bert", "BertModel"),
1258
+ ("big_bird", "BigBirdModel"),
1259
+ ("data2vec-text", "Data2VecTextModel"),
1260
+ ("deberta", "DebertaModel"),
1261
+ ("deberta-v2", "DebertaV2Model"),
1262
+ ("distilbert", "DistilBertModel"),
1263
+ ("electra", "ElectraModel"),
1264
+ ("flaubert", "FlaubertModel"),
1265
+ ("ibert", "IBertModel"),
1266
+ ("longformer", "LongformerModel"),
1267
+ ("mobilebert", "MobileBertModel"),
1268
+ ("mt5", "MT5EncoderModel"),
1269
+ ("nystromformer", "NystromformerModel"),
1270
+ ("reformer", "ReformerModel"),
1271
+ ("rembert", "RemBertModel"),
1272
+ ("roberta", "RobertaModel"),
1273
+ ("roberta-prelayernorm", "RobertaPreLayerNormModel"),
1274
+ ("roc_bert", "RoCBertModel"),
1275
+ ("roformer", "RoFormerModel"),
1276
+ ("squeezebert", "SqueezeBertModel"),
1277
+ ("t5", "T5EncoderModel"),
1278
+ ("umt5", "UMT5EncoderModel"),
1279
+ ("xlm", "XLMModel"),
1280
+ ("xlm-roberta", "XLMRobertaModel"),
1281
+ ("xlm-roberta-xl", "XLMRobertaXLModel"),
1282
+ ]
1283
+ )
1284
+
1285
+ MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
1286
+ [
1287
+ ("patchtsmixer", "PatchTSMixerForTimeSeriesClassification"),
1288
+ ("patchtst", "PatchTSTForClassification"),
1289
+ ]
1290
+ )
1291
+
1292
+ MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES = OrderedDict(
1293
+ [
1294
+ ("patchtsmixer", "PatchTSMixerForRegression"),
1295
+ ("patchtst", "PatchTSTForRegression"),
1296
+ ]
1297
+ )
1298
+
1299
+ MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES = OrderedDict(
1300
+ [
1301
+ ("swin2sr", "Swin2SRForImageSuperResolution"),
1302
+ ]
1303
+ )
1304
+
1305
+ MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_MAPPING_NAMES)
1306
+ MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_PRETRAINING_MAPPING_NAMES)
1307
+ MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_WITH_LM_HEAD_MAPPING_NAMES)
1308
+ MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
1309
+ MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING = _LazyAutoMapping(
1310
+ CONFIG_MAPPING_NAMES, MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING_NAMES
1311
+ )
1312
+ MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1313
+ CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
1314
+ )
1315
+ MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1316
+ CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES
1317
+ )
1318
+ MODEL_FOR_IMAGE_SEGMENTATION_MAPPING = _LazyAutoMapping(
1319
+ CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES
1320
+ )
1321
+ MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping(
1322
+ CONFIG_MAPPING_NAMES, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES
1323
+ )
1324
+ MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING = _LazyAutoMapping(
1325
+ CONFIG_MAPPING_NAMES, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING_NAMES
1326
+ )
1327
+ MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING = _LazyAutoMapping(
1328
+ CONFIG_MAPPING_NAMES, MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING_NAMES
1329
+ )
1330
+ MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1331
+ CONFIG_MAPPING_NAMES, MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES
1332
+ )
1333
+ MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
1334
+ MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
1335
+ CONFIG_MAPPING_NAMES, MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES
1336
+ )
1337
+ MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
1338
+ CONFIG_MAPPING_NAMES, MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES
1339
+ )
1340
+ MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES)
1341
+ MODEL_FOR_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_MAPPING_NAMES)
1342
+ MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping(
1343
+ CONFIG_MAPPING_NAMES, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES
1344
+ )
1345
+ MODEL_FOR_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES)
1346
+ MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING = _LazyAutoMapping(
1347
+ CONFIG_MAPPING_NAMES, MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES
1348
+ )
1349
+ MODEL_FOR_DEPTH_ESTIMATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES)
1350
+ MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
1351
+ CONFIG_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
1352
+ )
1353
+ MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1354
+ CONFIG_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
1355
+ )
1356
+ MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
1357
+ CONFIG_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
1358
+ )
1359
+ MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
1360
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES
1361
+ )
1362
+ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1363
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
1364
+ )
1365
+ MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES)
1366
+ MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(
1367
+ CONFIG_MAPPING_NAMES, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
1368
+ )
1369
+ MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1370
+ CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
1371
+ )
1372
+ MODEL_FOR_CTC_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES)
1373
+ MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES)
1374
+ MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1375
+ CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES
1376
+ )
1377
+ MODEL_FOR_AUDIO_XVECTOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES)
1378
+
1379
+ MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING = _LazyAutoMapping(
1380
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING_NAMES
1381
+ )
1382
+
1383
+ MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING_NAMES)
1384
+
1385
+ MODEL_FOR_BACKBONE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_BACKBONE_MAPPING_NAMES)
1386
+
1387
+ MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_MASK_GENERATION_MAPPING_NAMES)
1388
+
1389
+ MODEL_FOR_KEYPOINT_DETECTION_MAPPING = _LazyAutoMapping(
1390
+ CONFIG_MAPPING_NAMES, MODEL_FOR_KEYPOINT_DETECTION_MAPPING_NAMES
1391
+ )
1392
+
1393
+ MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES)
1394
+
1395
+ MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING = _LazyAutoMapping(
1396
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_CLASSIFICATION_MAPPING_NAMES
1397
+ )
1398
+
1399
+ MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING = _LazyAutoMapping(
1400
+ CONFIG_MAPPING_NAMES, MODEL_FOR_TIME_SERIES_REGRESSION_MAPPING_NAMES
1401
+ )
1402
+
1403
+ MODEL_FOR_IMAGE_TO_IMAGE_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, MODEL_FOR_IMAGE_TO_IMAGE_MAPPING_NAMES)
1404
+
1405
+
1406
+ class AutoModelForMaskGeneration(_BaseAutoModelClass):
1407
+ _model_mapping = MODEL_FOR_MASK_GENERATION_MAPPING
1408
+
1409
+
1410
+ class AutoModelForKeypointDetection(_BaseAutoModelClass):
1411
+ _model_mapping = MODEL_FOR_KEYPOINT_DETECTION_MAPPING
1412
+
1413
+
1414
+ class AutoModelForTextEncoding(_BaseAutoModelClass):
1415
+ _model_mapping = MODEL_FOR_TEXT_ENCODING_MAPPING
1416
+
1417
+
1418
+ class AutoModelForImageToImage(_BaseAutoModelClass):
1419
+ _model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING
1420
+
1421
+
1422
+ class AutoModel(_BaseAutoModelClass):
1423
+ _model_mapping = MODEL_MAPPING
1424
+
1425
+
1426
+ AutoModel = auto_class_update(AutoModel)
1427
+
1428
+
1429
+ class AutoModelForPreTraining(_BaseAutoModelClass):
1430
+ _model_mapping = MODEL_FOR_PRETRAINING_MAPPING
1431
+
1432
+
1433
+ AutoModelForPreTraining = auto_class_update(AutoModelForPreTraining, head_doc="pretraining")
1434
+
1435
+
1436
+ # Private on purpose, the public class will add the deprecation warnings.
1437
+ class _AutoModelWithLMHead(_BaseAutoModelClass):
1438
+ _model_mapping = MODEL_WITH_LM_HEAD_MAPPING
1439
+
1440
+
1441
+ _AutoModelWithLMHead = auto_class_update(_AutoModelWithLMHead, head_doc="language modeling")
1442
+
1443
+
1444
+ class AutoModelForCausalLM(_BaseAutoModelClass):
1445
+ _model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING
1446
+
1447
+
1448
+ AutoModelForCausalLM = auto_class_update(AutoModelForCausalLM, head_doc="causal language modeling")
1449
+
1450
+
1451
+ class AutoModelForMaskedLM(_BaseAutoModelClass):
1452
+ _model_mapping = MODEL_FOR_MASKED_LM_MAPPING
1453
+
1454
+
1455
+ AutoModelForMaskedLM = auto_class_update(AutoModelForMaskedLM, head_doc="masked language modeling")
1456
+
1457
+
1458
+ class AutoModelForSeq2SeqLM(_BaseAutoModelClass):
1459
+ _model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
1460
+
1461
+
1462
+ AutoModelForSeq2SeqLM = auto_class_update(
1463
+ AutoModelForSeq2SeqLM,
1464
+ head_doc="sequence-to-sequence language modeling",
1465
+ checkpoint_for_example="google-t5/t5-base",
1466
+ )
1467
+
1468
+
1469
+ class AutoModelForSequenceClassification(_BaseAutoModelClass):
1470
+ _model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
1471
+
1472
+
1473
+ AutoModelForSequenceClassification = auto_class_update(
1474
+ AutoModelForSequenceClassification, head_doc="sequence classification"
1475
+ )
1476
+
1477
+
1478
+ class AutoModelForQuestionAnswering(_BaseAutoModelClass):
1479
+ _model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING
1480
+
1481
+
1482
+ AutoModelForQuestionAnswering = auto_class_update(AutoModelForQuestionAnswering, head_doc="question answering")
1483
+
1484
+
1485
+ class AutoModelForTableQuestionAnswering(_BaseAutoModelClass):
1486
+ _model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
1487
+
1488
+
1489
+ AutoModelForTableQuestionAnswering = auto_class_update(
1490
+ AutoModelForTableQuestionAnswering,
1491
+ head_doc="table question answering",
1492
+ checkpoint_for_example="google/tapas-base-finetuned-wtq",
1493
+ )
1494
+
1495
+
1496
+ class AutoModelForVisualQuestionAnswering(_BaseAutoModelClass):
1497
+ _model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
1498
+
1499
+
1500
+ AutoModelForVisualQuestionAnswering = auto_class_update(
1501
+ AutoModelForVisualQuestionAnswering,
1502
+ head_doc="visual question answering",
1503
+ checkpoint_for_example="dandelin/vilt-b32-finetuned-vqa",
1504
+ )
1505
+
1506
+
1507
+ class AutoModelForDocumentQuestionAnswering(_BaseAutoModelClass):
1508
+ _model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
1509
+
1510
+
1511
+ AutoModelForDocumentQuestionAnswering = auto_class_update(
1512
+ AutoModelForDocumentQuestionAnswering,
1513
+ head_doc="document question answering",
1514
+ checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3',
1515
+ )
1516
+
1517
+
1518
+ class AutoModelForTokenClassification(_BaseAutoModelClass):
1519
+ _model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
1520
+
1521
+
1522
+ AutoModelForTokenClassification = auto_class_update(AutoModelForTokenClassification, head_doc="token classification")
1523
+
1524
+
1525
+ class AutoModelForMultipleChoice(_BaseAutoModelClass):
1526
+ _model_mapping = MODEL_FOR_MULTIPLE_CHOICE_MAPPING
1527
+
1528
+
1529
+ AutoModelForMultipleChoice = auto_class_update(AutoModelForMultipleChoice, head_doc="multiple choice")
1530
+
1531
+
1532
+ class AutoModelForNextSentencePrediction(_BaseAutoModelClass):
1533
+ _model_mapping = MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
1534
+
1535
+
1536
+ AutoModelForNextSentencePrediction = auto_class_update(
1537
+ AutoModelForNextSentencePrediction, head_doc="next sentence prediction"
1538
+ )
1539
+
1540
+
1541
+ class AutoModelForImageClassification(_BaseAutoModelClass):
1542
+ _model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
1543
+
1544
+
1545
+ AutoModelForImageClassification = auto_class_update(AutoModelForImageClassification, head_doc="image classification")
1546
+
1547
+
1548
+ class AutoModelForZeroShotImageClassification(_BaseAutoModelClass):
1549
+ _model_mapping = MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
1550
+
1551
+
1552
+ AutoModelForZeroShotImageClassification = auto_class_update(
1553
+ AutoModelForZeroShotImageClassification, head_doc="zero-shot image classification"
1554
+ )
1555
+
1556
+
1557
+ class AutoModelForImageSegmentation(_BaseAutoModelClass):
1558
+ _model_mapping = MODEL_FOR_IMAGE_SEGMENTATION_MAPPING
1559
+
1560
+
1561
+ AutoModelForImageSegmentation = auto_class_update(AutoModelForImageSegmentation, head_doc="image segmentation")
1562
+
1563
+
1564
+ class AutoModelForSemanticSegmentation(_BaseAutoModelClass):
1565
+ _model_mapping = MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
1566
+
1567
+
1568
+ AutoModelForSemanticSegmentation = auto_class_update(
1569
+ AutoModelForSemanticSegmentation, head_doc="semantic segmentation"
1570
+ )
1571
+
1572
+
1573
+ class AutoModelForUniversalSegmentation(_BaseAutoModelClass):
1574
+ _model_mapping = MODEL_FOR_UNIVERSAL_SEGMENTATION_MAPPING
1575
+
1576
+
1577
+ AutoModelForUniversalSegmentation = auto_class_update(
1578
+ AutoModelForUniversalSegmentation, head_doc="universal image segmentation"
1579
+ )
1580
+
1581
+
1582
+ class AutoModelForInstanceSegmentation(_BaseAutoModelClass):
1583
+ _model_mapping = MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING
1584
+
1585
+
1586
+ AutoModelForInstanceSegmentation = auto_class_update(
1587
+ AutoModelForInstanceSegmentation, head_doc="instance segmentation"
1588
+ )
1589
+
1590
+
1591
+ class AutoModelForObjectDetection(_BaseAutoModelClass):
1592
+ _model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING
1593
+
1594
+
1595
+ AutoModelForObjectDetection = auto_class_update(AutoModelForObjectDetection, head_doc="object detection")
1596
+
1597
+
1598
+ class AutoModelForZeroShotObjectDetection(_BaseAutoModelClass):
1599
+ _model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
1600
+
1601
+
1602
+ AutoModelForZeroShotObjectDetection = auto_class_update(
1603
+ AutoModelForZeroShotObjectDetection, head_doc="zero-shot object detection"
1604
+ )
1605
+
1606
+
1607
+ class AutoModelForDepthEstimation(_BaseAutoModelClass):
1608
+ _model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
1609
+
1610
+
1611
+ AutoModelForDepthEstimation = auto_class_update(AutoModelForDepthEstimation, head_doc="depth estimation")
1612
+
1613
+
1614
+ class AutoModelForVideoClassification(_BaseAutoModelClass):
1615
+ _model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
1616
+
1617
+
1618
+ AutoModelForVideoClassification = auto_class_update(AutoModelForVideoClassification, head_doc="video classification")
1619
+
1620
+
1621
+ class AutoModelForVision2Seq(_BaseAutoModelClass):
1622
+ _model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING
1623
+
1624
+
1625
+ AutoModelForVision2Seq = auto_class_update(AutoModelForVision2Seq, head_doc="vision-to-text modeling")
1626
+
1627
+
1628
+ class AutoModelForAudioClassification(_BaseAutoModelClass):
1629
+ _model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
1630
+
1631
+
1632
+ AutoModelForAudioClassification = auto_class_update(AutoModelForAudioClassification, head_doc="audio classification")
1633
+
1634
+
1635
+ class AutoModelForCTC(_BaseAutoModelClass):
1636
+ _model_mapping = MODEL_FOR_CTC_MAPPING
1637
+
1638
+
1639
+ AutoModelForCTC = auto_class_update(AutoModelForCTC, head_doc="connectionist temporal classification")
1640
+
1641
+
1642
+ class AutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
1643
+ _model_mapping = MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
1644
+
1645
+
1646
+ AutoModelForSpeechSeq2Seq = auto_class_update(
1647
+ AutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling"
1648
+ )
1649
+
1650
+
1651
+ class AutoModelForAudioFrameClassification(_BaseAutoModelClass):
1652
+ _model_mapping = MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING
1653
+
1654
+
1655
+ AutoModelForAudioFrameClassification = auto_class_update(
1656
+ AutoModelForAudioFrameClassification, head_doc="audio frame (token) classification"
1657
+ )
1658
+
1659
+
1660
+ class AutoModelForAudioXVector(_BaseAutoModelClass):
1661
+ _model_mapping = MODEL_FOR_AUDIO_XVECTOR_MAPPING
1662
+
1663
+
1664
+ class AutoModelForTextToSpectrogram(_BaseAutoModelClass):
1665
+ _model_mapping = MODEL_FOR_TEXT_TO_SPECTROGRAM_MAPPING
1666
+
1667
+
1668
+ class AutoModelForTextToWaveform(_BaseAutoModelClass):
1669
+ _model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING
1670
+
1671
+
1672
+ class AutoBackbone(_BaseAutoBackboneClass):
1673
+ _model_mapping = MODEL_FOR_BACKBONE_MAPPING
1674
+
1675
+
1676
+ AutoModelForAudioXVector = auto_class_update(AutoModelForAudioXVector, head_doc="audio retrieval via x-vector")
1677
+
1678
+
1679
+ class AutoModelForMaskedImageModeling(_BaseAutoModelClass):
1680
+ _model_mapping = MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING
1681
+
1682
+
1683
+ AutoModelForMaskedImageModeling = auto_class_update(AutoModelForMaskedImageModeling, head_doc="masked image modeling")
1684
+
1685
+
1686
+ class AutoModelWithLMHead(_AutoModelWithLMHead):
1687
+ @classmethod
1688
+ def from_config(cls, config):
1689
+ warnings.warn(
1690
+ "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
1691
+ "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
1692
+ "`AutoModelForSeq2SeqLM` for encoder-decoder models.",
1693
+ FutureWarning,
1694
+ )
1695
+ return super().from_config(config)
1696
+
1697
+ @classmethod
1698
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
1699
+ warnings.warn(
1700
+ "The class `AutoModelWithLMHead` is deprecated and will be removed in a future version. Please use "
1701
+ "`AutoModelForCausalLM` for causal language models, `AutoModelForMaskedLM` for masked language models and "
1702
+ "`AutoModelForSeq2SeqLM` for encoder-decoder models.",
1703
+ FutureWarning,
1704
+ )
1705
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_flax_auto.py ADDED
@@ -0,0 +1,382 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Auto Model class."""
16
+
17
+
18
+ from collections import OrderedDict
19
+
20
+ from ...utils import logging
21
+ from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
22
+ from .configuration_auto import CONFIG_MAPPING_NAMES
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ FLAX_MODEL_MAPPING_NAMES = OrderedDict(
29
+ [
30
+ # Base model mapping
31
+ ("albert", "FlaxAlbertModel"),
32
+ ("bart", "FlaxBartModel"),
33
+ ("beit", "FlaxBeitModel"),
34
+ ("bert", "FlaxBertModel"),
35
+ ("big_bird", "FlaxBigBirdModel"),
36
+ ("blenderbot", "FlaxBlenderbotModel"),
37
+ ("blenderbot-small", "FlaxBlenderbotSmallModel"),
38
+ ("bloom", "FlaxBloomModel"),
39
+ ("clip", "FlaxCLIPModel"),
40
+ ("distilbert", "FlaxDistilBertModel"),
41
+ ("electra", "FlaxElectraModel"),
42
+ ("gemma", "FlaxGemmaModel"),
43
+ ("gpt-sw3", "FlaxGPT2Model"),
44
+ ("gpt2", "FlaxGPT2Model"),
45
+ ("gpt_neo", "FlaxGPTNeoModel"),
46
+ ("gptj", "FlaxGPTJModel"),
47
+ ("llama", "FlaxLlamaModel"),
48
+ ("longt5", "FlaxLongT5Model"),
49
+ ("marian", "FlaxMarianModel"),
50
+ ("mbart", "FlaxMBartModel"),
51
+ ("mistral", "FlaxMistralModel"),
52
+ ("mt5", "FlaxMT5Model"),
53
+ ("opt", "FlaxOPTModel"),
54
+ ("pegasus", "FlaxPegasusModel"),
55
+ ("regnet", "FlaxRegNetModel"),
56
+ ("resnet", "FlaxResNetModel"),
57
+ ("roberta", "FlaxRobertaModel"),
58
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormModel"),
59
+ ("roformer", "FlaxRoFormerModel"),
60
+ ("t5", "FlaxT5Model"),
61
+ ("vision-text-dual-encoder", "FlaxVisionTextDualEncoderModel"),
62
+ ("vit", "FlaxViTModel"),
63
+ ("wav2vec2", "FlaxWav2Vec2Model"),
64
+ ("whisper", "FlaxWhisperModel"),
65
+ ("xglm", "FlaxXGLMModel"),
66
+ ("xlm-roberta", "FlaxXLMRobertaModel"),
67
+ ]
68
+ )
69
+
70
+ FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
71
+ [
72
+ # Model for pre-training mapping
73
+ ("albert", "FlaxAlbertForPreTraining"),
74
+ ("bart", "FlaxBartForConditionalGeneration"),
75
+ ("bert", "FlaxBertForPreTraining"),
76
+ ("big_bird", "FlaxBigBirdForPreTraining"),
77
+ ("electra", "FlaxElectraForPreTraining"),
78
+ ("longt5", "FlaxLongT5ForConditionalGeneration"),
79
+ ("mbart", "FlaxMBartForConditionalGeneration"),
80
+ ("mt5", "FlaxMT5ForConditionalGeneration"),
81
+ ("roberta", "FlaxRobertaForMaskedLM"),
82
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
83
+ ("roformer", "FlaxRoFormerForMaskedLM"),
84
+ ("t5", "FlaxT5ForConditionalGeneration"),
85
+ ("wav2vec2", "FlaxWav2Vec2ForPreTraining"),
86
+ ("whisper", "FlaxWhisperForConditionalGeneration"),
87
+ ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
88
+ ]
89
+ )
90
+
91
+ FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
92
+ [
93
+ # Model for Masked LM mapping
94
+ ("albert", "FlaxAlbertForMaskedLM"),
95
+ ("bart", "FlaxBartForConditionalGeneration"),
96
+ ("bert", "FlaxBertForMaskedLM"),
97
+ ("big_bird", "FlaxBigBirdForMaskedLM"),
98
+ ("distilbert", "FlaxDistilBertForMaskedLM"),
99
+ ("electra", "FlaxElectraForMaskedLM"),
100
+ ("mbart", "FlaxMBartForConditionalGeneration"),
101
+ ("roberta", "FlaxRobertaForMaskedLM"),
102
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMaskedLM"),
103
+ ("roformer", "FlaxRoFormerForMaskedLM"),
104
+ ("xlm-roberta", "FlaxXLMRobertaForMaskedLM"),
105
+ ]
106
+ )
107
+
108
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
109
+ [
110
+ # Model for Seq2Seq Causal LM mapping
111
+ ("bart", "FlaxBartForConditionalGeneration"),
112
+ ("blenderbot", "FlaxBlenderbotForConditionalGeneration"),
113
+ ("blenderbot-small", "FlaxBlenderbotSmallForConditionalGeneration"),
114
+ ("encoder-decoder", "FlaxEncoderDecoderModel"),
115
+ ("longt5", "FlaxLongT5ForConditionalGeneration"),
116
+ ("marian", "FlaxMarianMTModel"),
117
+ ("mbart", "FlaxMBartForConditionalGeneration"),
118
+ ("mt5", "FlaxMT5ForConditionalGeneration"),
119
+ ("pegasus", "FlaxPegasusForConditionalGeneration"),
120
+ ("t5", "FlaxT5ForConditionalGeneration"),
121
+ ]
122
+ )
123
+
124
+ FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
125
+ [
126
+ # Model for Image-classsification
127
+ ("beit", "FlaxBeitForImageClassification"),
128
+ ("regnet", "FlaxRegNetForImageClassification"),
129
+ ("resnet", "FlaxResNetForImageClassification"),
130
+ ("vit", "FlaxViTForImageClassification"),
131
+ ]
132
+ )
133
+
134
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict(
135
+ [
136
+ ("vision-encoder-decoder", "FlaxVisionEncoderDecoderModel"),
137
+ ]
138
+ )
139
+
140
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
141
+ [
142
+ # Model for Causal LM mapping
143
+ ("bart", "FlaxBartForCausalLM"),
144
+ ("bert", "FlaxBertForCausalLM"),
145
+ ("big_bird", "FlaxBigBirdForCausalLM"),
146
+ ("bloom", "FlaxBloomForCausalLM"),
147
+ ("electra", "FlaxElectraForCausalLM"),
148
+ ("gemma", "FlaxGemmaForCausalLM"),
149
+ ("gpt-sw3", "FlaxGPT2LMHeadModel"),
150
+ ("gpt2", "FlaxGPT2LMHeadModel"),
151
+ ("gpt_neo", "FlaxGPTNeoForCausalLM"),
152
+ ("gptj", "FlaxGPTJForCausalLM"),
153
+ ("llama", "FlaxLlamaForCausalLM"),
154
+ ("mistral", "FlaxMistralForCausalLM"),
155
+ ("opt", "FlaxOPTForCausalLM"),
156
+ ("roberta", "FlaxRobertaForCausalLM"),
157
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForCausalLM"),
158
+ ("xglm", "FlaxXGLMForCausalLM"),
159
+ ("xlm-roberta", "FlaxXLMRobertaForCausalLM"),
160
+ ]
161
+ )
162
+
163
+ FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
164
+ [
165
+ # Model for Sequence Classification mapping
166
+ ("albert", "FlaxAlbertForSequenceClassification"),
167
+ ("bart", "FlaxBartForSequenceClassification"),
168
+ ("bert", "FlaxBertForSequenceClassification"),
169
+ ("big_bird", "FlaxBigBirdForSequenceClassification"),
170
+ ("distilbert", "FlaxDistilBertForSequenceClassification"),
171
+ ("electra", "FlaxElectraForSequenceClassification"),
172
+ ("mbart", "FlaxMBartForSequenceClassification"),
173
+ ("roberta", "FlaxRobertaForSequenceClassification"),
174
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForSequenceClassification"),
175
+ ("roformer", "FlaxRoFormerForSequenceClassification"),
176
+ ("xlm-roberta", "FlaxXLMRobertaForSequenceClassification"),
177
+ ]
178
+ )
179
+
180
+ FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
181
+ [
182
+ # Model for Question Answering mapping
183
+ ("albert", "FlaxAlbertForQuestionAnswering"),
184
+ ("bart", "FlaxBartForQuestionAnswering"),
185
+ ("bert", "FlaxBertForQuestionAnswering"),
186
+ ("big_bird", "FlaxBigBirdForQuestionAnswering"),
187
+ ("distilbert", "FlaxDistilBertForQuestionAnswering"),
188
+ ("electra", "FlaxElectraForQuestionAnswering"),
189
+ ("mbart", "FlaxMBartForQuestionAnswering"),
190
+ ("roberta", "FlaxRobertaForQuestionAnswering"),
191
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForQuestionAnswering"),
192
+ ("roformer", "FlaxRoFormerForQuestionAnswering"),
193
+ ("xlm-roberta", "FlaxXLMRobertaForQuestionAnswering"),
194
+ ]
195
+ )
196
+
197
+ FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
198
+ [
199
+ # Model for Token Classification mapping
200
+ ("albert", "FlaxAlbertForTokenClassification"),
201
+ ("bert", "FlaxBertForTokenClassification"),
202
+ ("big_bird", "FlaxBigBirdForTokenClassification"),
203
+ ("distilbert", "FlaxDistilBertForTokenClassification"),
204
+ ("electra", "FlaxElectraForTokenClassification"),
205
+ ("roberta", "FlaxRobertaForTokenClassification"),
206
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForTokenClassification"),
207
+ ("roformer", "FlaxRoFormerForTokenClassification"),
208
+ ("xlm-roberta", "FlaxXLMRobertaForTokenClassification"),
209
+ ]
210
+ )
211
+
212
+ FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
213
+ [
214
+ # Model for Multiple Choice mapping
215
+ ("albert", "FlaxAlbertForMultipleChoice"),
216
+ ("bert", "FlaxBertForMultipleChoice"),
217
+ ("big_bird", "FlaxBigBirdForMultipleChoice"),
218
+ ("distilbert", "FlaxDistilBertForMultipleChoice"),
219
+ ("electra", "FlaxElectraForMultipleChoice"),
220
+ ("roberta", "FlaxRobertaForMultipleChoice"),
221
+ ("roberta-prelayernorm", "FlaxRobertaPreLayerNormForMultipleChoice"),
222
+ ("roformer", "FlaxRoFormerForMultipleChoice"),
223
+ ("xlm-roberta", "FlaxXLMRobertaForMultipleChoice"),
224
+ ]
225
+ )
226
+
227
+ FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict(
228
+ [
229
+ ("bert", "FlaxBertForNextSentencePrediction"),
230
+ ]
231
+ )
232
+
233
+ FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
234
+ [
235
+ ("speech-encoder-decoder", "FlaxSpeechEncoderDecoderModel"),
236
+ ("whisper", "FlaxWhisperForConditionalGeneration"),
237
+ ]
238
+ )
239
+
240
+ FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
241
+ [
242
+ ("whisper", "FlaxWhisperForAudioClassification"),
243
+ ]
244
+ )
245
+
246
+ FLAX_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
247
+ FLAX_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
248
+ FLAX_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
249
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
250
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
251
+ )
252
+ FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
253
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
254
+ )
255
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
256
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
257
+ FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
258
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
259
+ )
260
+ FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
261
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
262
+ )
263
+ FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
264
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
265
+ )
266
+ FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(
267
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
268
+ )
269
+ FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(
270
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
271
+ )
272
+ FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(
273
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
274
+ )
275
+ FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
276
+ CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
277
+ )
278
+
279
+
280
+ class FlaxAutoModel(_BaseAutoModelClass):
281
+ _model_mapping = FLAX_MODEL_MAPPING
282
+
283
+
284
+ FlaxAutoModel = auto_class_update(FlaxAutoModel)
285
+
286
+
287
+ class FlaxAutoModelForPreTraining(_BaseAutoModelClass):
288
+ _model_mapping = FLAX_MODEL_FOR_PRETRAINING_MAPPING
289
+
290
+
291
+ FlaxAutoModelForPreTraining = auto_class_update(FlaxAutoModelForPreTraining, head_doc="pretraining")
292
+
293
+
294
+ class FlaxAutoModelForCausalLM(_BaseAutoModelClass):
295
+ _model_mapping = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
296
+
297
+
298
+ FlaxAutoModelForCausalLM = auto_class_update(FlaxAutoModelForCausalLM, head_doc="causal language modeling")
299
+
300
+
301
+ class FlaxAutoModelForMaskedLM(_BaseAutoModelClass):
302
+ _model_mapping = FLAX_MODEL_FOR_MASKED_LM_MAPPING
303
+
304
+
305
+ FlaxAutoModelForMaskedLM = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="masked language modeling")
306
+
307
+
308
+ class FlaxAutoModelForSeq2SeqLM(_BaseAutoModelClass):
309
+ _model_mapping = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
310
+
311
+
312
+ FlaxAutoModelForSeq2SeqLM = auto_class_update(
313
+ FlaxAutoModelForSeq2SeqLM,
314
+ head_doc="sequence-to-sequence language modeling",
315
+ checkpoint_for_example="google-t5/t5-base",
316
+ )
317
+
318
+
319
+ class FlaxAutoModelForSequenceClassification(_BaseAutoModelClass):
320
+ _model_mapping = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
321
+
322
+
323
+ FlaxAutoModelForSequenceClassification = auto_class_update(
324
+ FlaxAutoModelForSequenceClassification, head_doc="sequence classification"
325
+ )
326
+
327
+
328
+ class FlaxAutoModelForQuestionAnswering(_BaseAutoModelClass):
329
+ _model_mapping = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
330
+
331
+
332
+ FlaxAutoModelForQuestionAnswering = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="question answering")
333
+
334
+
335
+ class FlaxAutoModelForTokenClassification(_BaseAutoModelClass):
336
+ _model_mapping = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
337
+
338
+
339
+ FlaxAutoModelForTokenClassification = auto_class_update(
340
+ FlaxAutoModelForTokenClassification, head_doc="token classification"
341
+ )
342
+
343
+
344
+ class FlaxAutoModelForMultipleChoice(_BaseAutoModelClass):
345
+ _model_mapping = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
346
+
347
+
348
+ FlaxAutoModelForMultipleChoice = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="multiple choice")
349
+
350
+
351
+ class FlaxAutoModelForNextSentencePrediction(_BaseAutoModelClass):
352
+ _model_mapping = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
353
+
354
+
355
+ FlaxAutoModelForNextSentencePrediction = auto_class_update(
356
+ FlaxAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
357
+ )
358
+
359
+
360
+ class FlaxAutoModelForImageClassification(_BaseAutoModelClass):
361
+ _model_mapping = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
362
+
363
+
364
+ FlaxAutoModelForImageClassification = auto_class_update(
365
+ FlaxAutoModelForImageClassification, head_doc="image classification"
366
+ )
367
+
368
+
369
+ class FlaxAutoModelForVision2Seq(_BaseAutoModelClass):
370
+ _model_mapping = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
371
+
372
+
373
+ FlaxAutoModelForVision2Seq = auto_class_update(FlaxAutoModelForVision2Seq, head_doc="vision-to-text modeling")
374
+
375
+
376
+ class FlaxAutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
377
+ _model_mapping = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
378
+
379
+
380
+ FlaxAutoModelForSpeechSeq2Seq = auto_class_update(
381
+ FlaxAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling"
382
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/modeling_tf_auto.py ADDED
@@ -0,0 +1,721 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Auto Model class."""
16
+
17
+
18
+ import warnings
19
+ from collections import OrderedDict
20
+
21
+ from ...utils import logging
22
+ from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
23
+ from .configuration_auto import CONFIG_MAPPING_NAMES
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ TF_MODEL_MAPPING_NAMES = OrderedDict(
30
+ [
31
+ # Base model mapping
32
+ ("albert", "TFAlbertModel"),
33
+ ("bart", "TFBartModel"),
34
+ ("bert", "TFBertModel"),
35
+ ("blenderbot", "TFBlenderbotModel"),
36
+ ("blenderbot-small", "TFBlenderbotSmallModel"),
37
+ ("blip", "TFBlipModel"),
38
+ ("camembert", "TFCamembertModel"),
39
+ ("clip", "TFCLIPModel"),
40
+ ("convbert", "TFConvBertModel"),
41
+ ("convnext", "TFConvNextModel"),
42
+ ("convnextv2", "TFConvNextV2Model"),
43
+ ("ctrl", "TFCTRLModel"),
44
+ ("cvt", "TFCvtModel"),
45
+ ("data2vec-vision", "TFData2VecVisionModel"),
46
+ ("deberta", "TFDebertaModel"),
47
+ ("deberta-v2", "TFDebertaV2Model"),
48
+ ("deit", "TFDeiTModel"),
49
+ ("distilbert", "TFDistilBertModel"),
50
+ ("dpr", "TFDPRQuestionEncoder"),
51
+ ("efficientformer", "TFEfficientFormerModel"),
52
+ ("electra", "TFElectraModel"),
53
+ ("esm", "TFEsmModel"),
54
+ ("flaubert", "TFFlaubertModel"),
55
+ ("funnel", ("TFFunnelModel", "TFFunnelBaseModel")),
56
+ ("gpt-sw3", "TFGPT2Model"),
57
+ ("gpt2", "TFGPT2Model"),
58
+ ("gptj", "TFGPTJModel"),
59
+ ("groupvit", "TFGroupViTModel"),
60
+ ("hubert", "TFHubertModel"),
61
+ ("layoutlm", "TFLayoutLMModel"),
62
+ ("layoutlmv3", "TFLayoutLMv3Model"),
63
+ ("led", "TFLEDModel"),
64
+ ("longformer", "TFLongformerModel"),
65
+ ("lxmert", "TFLxmertModel"),
66
+ ("marian", "TFMarianModel"),
67
+ ("mbart", "TFMBartModel"),
68
+ ("mobilebert", "TFMobileBertModel"),
69
+ ("mobilevit", "TFMobileViTModel"),
70
+ ("mpnet", "TFMPNetModel"),
71
+ ("mt5", "TFMT5Model"),
72
+ ("openai-gpt", "TFOpenAIGPTModel"),
73
+ ("opt", "TFOPTModel"),
74
+ ("pegasus", "TFPegasusModel"),
75
+ ("regnet", "TFRegNetModel"),
76
+ ("rembert", "TFRemBertModel"),
77
+ ("resnet", "TFResNetModel"),
78
+ ("roberta", "TFRobertaModel"),
79
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormModel"),
80
+ ("roformer", "TFRoFormerModel"),
81
+ ("sam", "TFSamModel"),
82
+ ("segformer", "TFSegformerModel"),
83
+ ("speech_to_text", "TFSpeech2TextModel"),
84
+ ("swin", "TFSwinModel"),
85
+ ("t5", "TFT5Model"),
86
+ ("tapas", "TFTapasModel"),
87
+ ("transfo-xl", "TFTransfoXLModel"),
88
+ ("vision-text-dual-encoder", "TFVisionTextDualEncoderModel"),
89
+ ("vit", "TFViTModel"),
90
+ ("vit_mae", "TFViTMAEModel"),
91
+ ("wav2vec2", "TFWav2Vec2Model"),
92
+ ("whisper", "TFWhisperModel"),
93
+ ("xglm", "TFXGLMModel"),
94
+ ("xlm", "TFXLMModel"),
95
+ ("xlm-roberta", "TFXLMRobertaModel"),
96
+ ("xlnet", "TFXLNetModel"),
97
+ ]
98
+ )
99
+
100
+ TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES = OrderedDict(
101
+ [
102
+ # Model for pre-training mapping
103
+ ("albert", "TFAlbertForPreTraining"),
104
+ ("bart", "TFBartForConditionalGeneration"),
105
+ ("bert", "TFBertForPreTraining"),
106
+ ("camembert", "TFCamembertForMaskedLM"),
107
+ ("ctrl", "TFCTRLLMHeadModel"),
108
+ ("distilbert", "TFDistilBertForMaskedLM"),
109
+ ("electra", "TFElectraForPreTraining"),
110
+ ("flaubert", "TFFlaubertWithLMHeadModel"),
111
+ ("funnel", "TFFunnelForPreTraining"),
112
+ ("gpt-sw3", "TFGPT2LMHeadModel"),
113
+ ("gpt2", "TFGPT2LMHeadModel"),
114
+ ("layoutlm", "TFLayoutLMForMaskedLM"),
115
+ ("lxmert", "TFLxmertForPreTraining"),
116
+ ("mobilebert", "TFMobileBertForPreTraining"),
117
+ ("mpnet", "TFMPNetForMaskedLM"),
118
+ ("openai-gpt", "TFOpenAIGPTLMHeadModel"),
119
+ ("roberta", "TFRobertaForMaskedLM"),
120
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"),
121
+ ("t5", "TFT5ForConditionalGeneration"),
122
+ ("tapas", "TFTapasForMaskedLM"),
123
+ ("transfo-xl", "TFTransfoXLLMHeadModel"),
124
+ ("vit_mae", "TFViTMAEForPreTraining"),
125
+ ("xlm", "TFXLMWithLMHeadModel"),
126
+ ("xlm-roberta", "TFXLMRobertaForMaskedLM"),
127
+ ("xlnet", "TFXLNetLMHeadModel"),
128
+ ]
129
+ )
130
+
131
+ TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES = OrderedDict(
132
+ [
133
+ # Model with LM heads mapping
134
+ ("albert", "TFAlbertForMaskedLM"),
135
+ ("bart", "TFBartForConditionalGeneration"),
136
+ ("bert", "TFBertForMaskedLM"),
137
+ ("camembert", "TFCamembertForMaskedLM"),
138
+ ("convbert", "TFConvBertForMaskedLM"),
139
+ ("ctrl", "TFCTRLLMHeadModel"),
140
+ ("distilbert", "TFDistilBertForMaskedLM"),
141
+ ("electra", "TFElectraForMaskedLM"),
142
+ ("esm", "TFEsmForMaskedLM"),
143
+ ("flaubert", "TFFlaubertWithLMHeadModel"),
144
+ ("funnel", "TFFunnelForMaskedLM"),
145
+ ("gpt-sw3", "TFGPT2LMHeadModel"),
146
+ ("gpt2", "TFGPT2LMHeadModel"),
147
+ ("gptj", "TFGPTJForCausalLM"),
148
+ ("layoutlm", "TFLayoutLMForMaskedLM"),
149
+ ("led", "TFLEDForConditionalGeneration"),
150
+ ("longformer", "TFLongformerForMaskedLM"),
151
+ ("marian", "TFMarianMTModel"),
152
+ ("mobilebert", "TFMobileBertForMaskedLM"),
153
+ ("mpnet", "TFMPNetForMaskedLM"),
154
+ ("openai-gpt", "TFOpenAIGPTLMHeadModel"),
155
+ ("rembert", "TFRemBertForMaskedLM"),
156
+ ("roberta", "TFRobertaForMaskedLM"),
157
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"),
158
+ ("roformer", "TFRoFormerForMaskedLM"),
159
+ ("speech_to_text", "TFSpeech2TextForConditionalGeneration"),
160
+ ("t5", "TFT5ForConditionalGeneration"),
161
+ ("tapas", "TFTapasForMaskedLM"),
162
+ ("transfo-xl", "TFTransfoXLLMHeadModel"),
163
+ ("whisper", "TFWhisperForConditionalGeneration"),
164
+ ("xlm", "TFXLMWithLMHeadModel"),
165
+ ("xlm-roberta", "TFXLMRobertaForMaskedLM"),
166
+ ("xlnet", "TFXLNetLMHeadModel"),
167
+ ]
168
+ )
169
+
170
+ TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
171
+ [
172
+ # Model for Causal LM mapping
173
+ ("bert", "TFBertLMHeadModel"),
174
+ ("camembert", "TFCamembertForCausalLM"),
175
+ ("ctrl", "TFCTRLLMHeadModel"),
176
+ ("gpt-sw3", "TFGPT2LMHeadModel"),
177
+ ("gpt2", "TFGPT2LMHeadModel"),
178
+ ("gptj", "TFGPTJForCausalLM"),
179
+ ("openai-gpt", "TFOpenAIGPTLMHeadModel"),
180
+ ("opt", "TFOPTForCausalLM"),
181
+ ("rembert", "TFRemBertForCausalLM"),
182
+ ("roberta", "TFRobertaForCausalLM"),
183
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForCausalLM"),
184
+ ("roformer", "TFRoFormerForCausalLM"),
185
+ ("transfo-xl", "TFTransfoXLLMHeadModel"),
186
+ ("xglm", "TFXGLMForCausalLM"),
187
+ ("xlm", "TFXLMWithLMHeadModel"),
188
+ ("xlm-roberta", "TFXLMRobertaForCausalLM"),
189
+ ("xlnet", "TFXLNetLMHeadModel"),
190
+ ]
191
+ )
192
+
193
+ TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES = OrderedDict(
194
+ [
195
+ ("deit", "TFDeiTForMaskedImageModeling"),
196
+ ("swin", "TFSwinForMaskedImageModeling"),
197
+ ]
198
+ )
199
+
200
+ TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
201
+ [
202
+ # Model for Image-classsification
203
+ ("convnext", "TFConvNextForImageClassification"),
204
+ ("convnextv2", "TFConvNextV2ForImageClassification"),
205
+ ("cvt", "TFCvtForImageClassification"),
206
+ ("data2vec-vision", "TFData2VecVisionForImageClassification"),
207
+ ("deit", ("TFDeiTForImageClassification", "TFDeiTForImageClassificationWithTeacher")),
208
+ (
209
+ "efficientformer",
210
+ ("TFEfficientFormerForImageClassification", "TFEfficientFormerForImageClassificationWithTeacher"),
211
+ ),
212
+ ("mobilevit", "TFMobileViTForImageClassification"),
213
+ ("regnet", "TFRegNetForImageClassification"),
214
+ ("resnet", "TFResNetForImageClassification"),
215
+ ("segformer", "TFSegformerForImageClassification"),
216
+ ("swin", "TFSwinForImageClassification"),
217
+ ("vit", "TFViTForImageClassification"),
218
+ ]
219
+ )
220
+
221
+
222
+ TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
223
+ [
224
+ # Model for Zero Shot Image Classification mapping
225
+ ("blip", "TFBlipModel"),
226
+ ("clip", "TFCLIPModel"),
227
+ ]
228
+ )
229
+
230
+
231
+ TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES = OrderedDict(
232
+ [
233
+ # Model for Semantic Segmentation mapping
234
+ ("data2vec-vision", "TFData2VecVisionForSemanticSegmentation"),
235
+ ("mobilevit", "TFMobileViTForSemanticSegmentation"),
236
+ ("segformer", "TFSegformerForSemanticSegmentation"),
237
+ ]
238
+ )
239
+
240
+ TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES = OrderedDict(
241
+ [
242
+ ("blip", "TFBlipForConditionalGeneration"),
243
+ ("vision-encoder-decoder", "TFVisionEncoderDecoderModel"),
244
+ ]
245
+ )
246
+
247
+ TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES = OrderedDict(
248
+ [
249
+ # Model for Masked LM mapping
250
+ ("albert", "TFAlbertForMaskedLM"),
251
+ ("bert", "TFBertForMaskedLM"),
252
+ ("camembert", "TFCamembertForMaskedLM"),
253
+ ("convbert", "TFConvBertForMaskedLM"),
254
+ ("deberta", "TFDebertaForMaskedLM"),
255
+ ("deberta-v2", "TFDebertaV2ForMaskedLM"),
256
+ ("distilbert", "TFDistilBertForMaskedLM"),
257
+ ("electra", "TFElectraForMaskedLM"),
258
+ ("esm", "TFEsmForMaskedLM"),
259
+ ("flaubert", "TFFlaubertWithLMHeadModel"),
260
+ ("funnel", "TFFunnelForMaskedLM"),
261
+ ("layoutlm", "TFLayoutLMForMaskedLM"),
262
+ ("longformer", "TFLongformerForMaskedLM"),
263
+ ("mobilebert", "TFMobileBertForMaskedLM"),
264
+ ("mpnet", "TFMPNetForMaskedLM"),
265
+ ("rembert", "TFRemBertForMaskedLM"),
266
+ ("roberta", "TFRobertaForMaskedLM"),
267
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMaskedLM"),
268
+ ("roformer", "TFRoFormerForMaskedLM"),
269
+ ("tapas", "TFTapasForMaskedLM"),
270
+ ("xlm", "TFXLMWithLMHeadModel"),
271
+ ("xlm-roberta", "TFXLMRobertaForMaskedLM"),
272
+ ]
273
+ )
274
+
275
+ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES = OrderedDict(
276
+ [
277
+ # Model for Seq2Seq Causal LM mapping
278
+ ("bart", "TFBartForConditionalGeneration"),
279
+ ("blenderbot", "TFBlenderbotForConditionalGeneration"),
280
+ ("blenderbot-small", "TFBlenderbotSmallForConditionalGeneration"),
281
+ ("encoder-decoder", "TFEncoderDecoderModel"),
282
+ ("led", "TFLEDForConditionalGeneration"),
283
+ ("marian", "TFMarianMTModel"),
284
+ ("mbart", "TFMBartForConditionalGeneration"),
285
+ ("mt5", "TFMT5ForConditionalGeneration"),
286
+ ("pegasus", "TFPegasusForConditionalGeneration"),
287
+ ("t5", "TFT5ForConditionalGeneration"),
288
+ ]
289
+ )
290
+
291
+ TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES = OrderedDict(
292
+ [
293
+ ("speech_to_text", "TFSpeech2TextForConditionalGeneration"),
294
+ ("whisper", "TFWhisperForConditionalGeneration"),
295
+ ]
296
+ )
297
+
298
+ TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
299
+ [
300
+ # Model for Sequence Classification mapping
301
+ ("albert", "TFAlbertForSequenceClassification"),
302
+ ("bart", "TFBartForSequenceClassification"),
303
+ ("bert", "TFBertForSequenceClassification"),
304
+ ("camembert", "TFCamembertForSequenceClassification"),
305
+ ("convbert", "TFConvBertForSequenceClassification"),
306
+ ("ctrl", "TFCTRLForSequenceClassification"),
307
+ ("deberta", "TFDebertaForSequenceClassification"),
308
+ ("deberta-v2", "TFDebertaV2ForSequenceClassification"),
309
+ ("distilbert", "TFDistilBertForSequenceClassification"),
310
+ ("electra", "TFElectraForSequenceClassification"),
311
+ ("esm", "TFEsmForSequenceClassification"),
312
+ ("flaubert", "TFFlaubertForSequenceClassification"),
313
+ ("funnel", "TFFunnelForSequenceClassification"),
314
+ ("gpt-sw3", "TFGPT2ForSequenceClassification"),
315
+ ("gpt2", "TFGPT2ForSequenceClassification"),
316
+ ("gptj", "TFGPTJForSequenceClassification"),
317
+ ("layoutlm", "TFLayoutLMForSequenceClassification"),
318
+ ("layoutlmv3", "TFLayoutLMv3ForSequenceClassification"),
319
+ ("longformer", "TFLongformerForSequenceClassification"),
320
+ ("mobilebert", "TFMobileBertForSequenceClassification"),
321
+ ("mpnet", "TFMPNetForSequenceClassification"),
322
+ ("openai-gpt", "TFOpenAIGPTForSequenceClassification"),
323
+ ("rembert", "TFRemBertForSequenceClassification"),
324
+ ("roberta", "TFRobertaForSequenceClassification"),
325
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForSequenceClassification"),
326
+ ("roformer", "TFRoFormerForSequenceClassification"),
327
+ ("tapas", "TFTapasForSequenceClassification"),
328
+ ("transfo-xl", "TFTransfoXLForSequenceClassification"),
329
+ ("xlm", "TFXLMForSequenceClassification"),
330
+ ("xlm-roberta", "TFXLMRobertaForSequenceClassification"),
331
+ ("xlnet", "TFXLNetForSequenceClassification"),
332
+ ]
333
+ )
334
+
335
+ TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
336
+ [
337
+ # Model for Question Answering mapping
338
+ ("albert", "TFAlbertForQuestionAnswering"),
339
+ ("bert", "TFBertForQuestionAnswering"),
340
+ ("camembert", "TFCamembertForQuestionAnswering"),
341
+ ("convbert", "TFConvBertForQuestionAnswering"),
342
+ ("deberta", "TFDebertaForQuestionAnswering"),
343
+ ("deberta-v2", "TFDebertaV2ForQuestionAnswering"),
344
+ ("distilbert", "TFDistilBertForQuestionAnswering"),
345
+ ("electra", "TFElectraForQuestionAnswering"),
346
+ ("flaubert", "TFFlaubertForQuestionAnsweringSimple"),
347
+ ("funnel", "TFFunnelForQuestionAnswering"),
348
+ ("gptj", "TFGPTJForQuestionAnswering"),
349
+ ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"),
350
+ ("longformer", "TFLongformerForQuestionAnswering"),
351
+ ("mobilebert", "TFMobileBertForQuestionAnswering"),
352
+ ("mpnet", "TFMPNetForQuestionAnswering"),
353
+ ("rembert", "TFRemBertForQuestionAnswering"),
354
+ ("roberta", "TFRobertaForQuestionAnswering"),
355
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForQuestionAnswering"),
356
+ ("roformer", "TFRoFormerForQuestionAnswering"),
357
+ ("xlm", "TFXLMForQuestionAnsweringSimple"),
358
+ ("xlm-roberta", "TFXLMRobertaForQuestionAnswering"),
359
+ ("xlnet", "TFXLNetForQuestionAnsweringSimple"),
360
+ ]
361
+ )
362
+ TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES = OrderedDict([("wav2vec2", "TFWav2Vec2ForSequenceClassification")])
363
+
364
+ TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
365
+ [
366
+ ("layoutlm", "TFLayoutLMForQuestionAnswering"),
367
+ ("layoutlmv3", "TFLayoutLMv3ForQuestionAnswering"),
368
+ ]
369
+ )
370
+
371
+
372
+ TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
373
+ [
374
+ # Model for Table Question Answering mapping
375
+ ("tapas", "TFTapasForQuestionAnswering"),
376
+ ]
377
+ )
378
+
379
+ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES = OrderedDict(
380
+ [
381
+ # Model for Token Classification mapping
382
+ ("albert", "TFAlbertForTokenClassification"),
383
+ ("bert", "TFBertForTokenClassification"),
384
+ ("camembert", "TFCamembertForTokenClassification"),
385
+ ("convbert", "TFConvBertForTokenClassification"),
386
+ ("deberta", "TFDebertaForTokenClassification"),
387
+ ("deberta-v2", "TFDebertaV2ForTokenClassification"),
388
+ ("distilbert", "TFDistilBertForTokenClassification"),
389
+ ("electra", "TFElectraForTokenClassification"),
390
+ ("esm", "TFEsmForTokenClassification"),
391
+ ("flaubert", "TFFlaubertForTokenClassification"),
392
+ ("funnel", "TFFunnelForTokenClassification"),
393
+ ("layoutlm", "TFLayoutLMForTokenClassification"),
394
+ ("layoutlmv3", "TFLayoutLMv3ForTokenClassification"),
395
+ ("longformer", "TFLongformerForTokenClassification"),
396
+ ("mobilebert", "TFMobileBertForTokenClassification"),
397
+ ("mpnet", "TFMPNetForTokenClassification"),
398
+ ("rembert", "TFRemBertForTokenClassification"),
399
+ ("roberta", "TFRobertaForTokenClassification"),
400
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForTokenClassification"),
401
+ ("roformer", "TFRoFormerForTokenClassification"),
402
+ ("xlm", "TFXLMForTokenClassification"),
403
+ ("xlm-roberta", "TFXLMRobertaForTokenClassification"),
404
+ ("xlnet", "TFXLNetForTokenClassification"),
405
+ ]
406
+ )
407
+
408
+ TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES = OrderedDict(
409
+ [
410
+ # Model for Multiple Choice mapping
411
+ ("albert", "TFAlbertForMultipleChoice"),
412
+ ("bert", "TFBertForMultipleChoice"),
413
+ ("camembert", "TFCamembertForMultipleChoice"),
414
+ ("convbert", "TFConvBertForMultipleChoice"),
415
+ ("deberta-v2", "TFDebertaV2ForMultipleChoice"),
416
+ ("distilbert", "TFDistilBertForMultipleChoice"),
417
+ ("electra", "TFElectraForMultipleChoice"),
418
+ ("flaubert", "TFFlaubertForMultipleChoice"),
419
+ ("funnel", "TFFunnelForMultipleChoice"),
420
+ ("longformer", "TFLongformerForMultipleChoice"),
421
+ ("mobilebert", "TFMobileBertForMultipleChoice"),
422
+ ("mpnet", "TFMPNetForMultipleChoice"),
423
+ ("rembert", "TFRemBertForMultipleChoice"),
424
+ ("roberta", "TFRobertaForMultipleChoice"),
425
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormForMultipleChoice"),
426
+ ("roformer", "TFRoFormerForMultipleChoice"),
427
+ ("xlm", "TFXLMForMultipleChoice"),
428
+ ("xlm-roberta", "TFXLMRobertaForMultipleChoice"),
429
+ ("xlnet", "TFXLNetForMultipleChoice"),
430
+ ]
431
+ )
432
+
433
+ TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES = OrderedDict(
434
+ [
435
+ ("bert", "TFBertForNextSentencePrediction"),
436
+ ("mobilebert", "TFMobileBertForNextSentencePrediction"),
437
+ ]
438
+ )
439
+ TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES = OrderedDict(
440
+ [
441
+ ("sam", "TFSamModel"),
442
+ ]
443
+ )
444
+ TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES = OrderedDict(
445
+ [
446
+ ("albert", "TFAlbertModel"),
447
+ ("bert", "TFBertModel"),
448
+ ("convbert", "TFConvBertModel"),
449
+ ("deberta", "TFDebertaModel"),
450
+ ("deberta-v2", "TFDebertaV2Model"),
451
+ ("distilbert", "TFDistilBertModel"),
452
+ ("electra", "TFElectraModel"),
453
+ ("flaubert", "TFFlaubertModel"),
454
+ ("longformer", "TFLongformerModel"),
455
+ ("mobilebert", "TFMobileBertModel"),
456
+ ("mt5", "TFMT5EncoderModel"),
457
+ ("rembert", "TFRemBertModel"),
458
+ ("roberta", "TFRobertaModel"),
459
+ ("roberta-prelayernorm", "TFRobertaPreLayerNormModel"),
460
+ ("roformer", "TFRoFormerModel"),
461
+ ("t5", "TFT5EncoderModel"),
462
+ ("xlm", "TFXLMModel"),
463
+ ("xlm-roberta", "TFXLMRobertaModel"),
464
+ ]
465
+ )
466
+
467
+ TF_MODEL_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_MAPPING_NAMES)
468
+ TF_MODEL_FOR_PRETRAINING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
469
+ TF_MODEL_WITH_LM_HEAD_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_WITH_LM_HEAD_MAPPING_NAMES)
470
+ TF_MODEL_FOR_CAUSAL_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
471
+ TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING = _LazyAutoMapping(
472
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING_NAMES
473
+ )
474
+ TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
475
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
476
+ )
477
+ TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
478
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES
479
+ )
480
+ TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING = _LazyAutoMapping(
481
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES
482
+ )
483
+ TF_MODEL_FOR_VISION_2_SEQ_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
484
+ TF_MODEL_FOR_MASKED_LM_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
485
+ TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = _LazyAutoMapping(
486
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
487
+ )
488
+ TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = _LazyAutoMapping(
489
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
490
+ )
491
+ TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = _LazyAutoMapping(
492
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
493
+ )
494
+ TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
495
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
496
+ )
497
+ TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
498
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES
499
+ )
500
+ TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING = _LazyAutoMapping(
501
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES
502
+ )
503
+ TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = _LazyAutoMapping(
504
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
505
+ )
506
+ TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = _LazyAutoMapping(
507
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
508
+ )
509
+ TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = _LazyAutoMapping(
510
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
511
+ )
512
+ TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING = _LazyAutoMapping(
513
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
514
+ )
515
+
516
+ TF_MODEL_FOR_MASK_GENERATION_MAPPING = _LazyAutoMapping(
517
+ CONFIG_MAPPING_NAMES, TF_MODEL_FOR_MASK_GENERATION_MAPPING_NAMES
518
+ )
519
+
520
+ TF_MODEL_FOR_TEXT_ENCODING_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TF_MODEL_FOR_TEXT_ENCODING_MAPPING_NAMES)
521
+
522
+
523
+ class TFAutoModelForMaskGeneration(_BaseAutoModelClass):
524
+ _model_mapping = TF_MODEL_FOR_MASK_GENERATION_MAPPING
525
+
526
+
527
+ class TFAutoModelForTextEncoding(_BaseAutoModelClass):
528
+ _model_mapping = TF_MODEL_FOR_TEXT_ENCODING_MAPPING
529
+
530
+
531
+ class TFAutoModel(_BaseAutoModelClass):
532
+ _model_mapping = TF_MODEL_MAPPING
533
+
534
+
535
+ TFAutoModel = auto_class_update(TFAutoModel)
536
+
537
+
538
+ class TFAutoModelForAudioClassification(_BaseAutoModelClass):
539
+ _model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
540
+
541
+
542
+ TFAutoModelForAudioClassification = auto_class_update(
543
+ TFAutoModelForAudioClassification, head_doc="audio classification"
544
+ )
545
+
546
+
547
+ class TFAutoModelForPreTraining(_BaseAutoModelClass):
548
+ _model_mapping = TF_MODEL_FOR_PRETRAINING_MAPPING
549
+
550
+
551
+ TFAutoModelForPreTraining = auto_class_update(TFAutoModelForPreTraining, head_doc="pretraining")
552
+
553
+
554
+ # Private on purpose, the public class will add the deprecation warnings.
555
+ class _TFAutoModelWithLMHead(_BaseAutoModelClass):
556
+ _model_mapping = TF_MODEL_WITH_LM_HEAD_MAPPING
557
+
558
+
559
+ _TFAutoModelWithLMHead = auto_class_update(_TFAutoModelWithLMHead, head_doc="language modeling")
560
+
561
+
562
+ class TFAutoModelForCausalLM(_BaseAutoModelClass):
563
+ _model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING
564
+
565
+
566
+ TFAutoModelForCausalLM = auto_class_update(TFAutoModelForCausalLM, head_doc="causal language modeling")
567
+
568
+
569
+ class TFAutoModelForMaskedImageModeling(_BaseAutoModelClass):
570
+ _model_mapping = TF_MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING
571
+
572
+
573
+ TFAutoModelForMaskedImageModeling = auto_class_update(
574
+ TFAutoModelForMaskedImageModeling, head_doc="masked image modeling"
575
+ )
576
+
577
+
578
+ class TFAutoModelForImageClassification(_BaseAutoModelClass):
579
+ _model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
580
+
581
+
582
+ TFAutoModelForImageClassification = auto_class_update(
583
+ TFAutoModelForImageClassification, head_doc="image classification"
584
+ )
585
+
586
+
587
+ class TFAutoModelForZeroShotImageClassification(_BaseAutoModelClass):
588
+ _model_mapping = TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
589
+
590
+
591
+ TFAutoModelForZeroShotImageClassification = auto_class_update(
592
+ TFAutoModelForZeroShotImageClassification, head_doc="zero-shot image classification"
593
+ )
594
+
595
+
596
+ class TFAutoModelForSemanticSegmentation(_BaseAutoModelClass):
597
+ _model_mapping = TF_MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING
598
+
599
+
600
+ TFAutoModelForSemanticSegmentation = auto_class_update(
601
+ TFAutoModelForSemanticSegmentation, head_doc="semantic segmentation"
602
+ )
603
+
604
+
605
+ class TFAutoModelForVision2Seq(_BaseAutoModelClass):
606
+ _model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING
607
+
608
+
609
+ TFAutoModelForVision2Seq = auto_class_update(TFAutoModelForVision2Seq, head_doc="vision-to-text modeling")
610
+
611
+
612
+ class TFAutoModelForMaskedLM(_BaseAutoModelClass):
613
+ _model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING
614
+
615
+
616
+ TFAutoModelForMaskedLM = auto_class_update(TFAutoModelForMaskedLM, head_doc="masked language modeling")
617
+
618
+
619
+ class TFAutoModelForSeq2SeqLM(_BaseAutoModelClass):
620
+ _model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
621
+
622
+
623
+ TFAutoModelForSeq2SeqLM = auto_class_update(
624
+ TFAutoModelForSeq2SeqLM,
625
+ head_doc="sequence-to-sequence language modeling",
626
+ checkpoint_for_example="google-t5/t5-base",
627
+ )
628
+
629
+
630
+ class TFAutoModelForSequenceClassification(_BaseAutoModelClass):
631
+ _model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
632
+
633
+
634
+ TFAutoModelForSequenceClassification = auto_class_update(
635
+ TFAutoModelForSequenceClassification, head_doc="sequence classification"
636
+ )
637
+
638
+
639
+ class TFAutoModelForQuestionAnswering(_BaseAutoModelClass):
640
+ _model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING
641
+
642
+
643
+ TFAutoModelForQuestionAnswering = auto_class_update(TFAutoModelForQuestionAnswering, head_doc="question answering")
644
+
645
+
646
+ class TFAutoModelForDocumentQuestionAnswering(_BaseAutoModelClass):
647
+ _model_mapping = TF_MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
648
+
649
+
650
+ TFAutoModelForDocumentQuestionAnswering = auto_class_update(
651
+ TFAutoModelForDocumentQuestionAnswering,
652
+ head_doc="document question answering",
653
+ checkpoint_for_example='impira/layoutlm-document-qa", revision="52e01b3',
654
+ )
655
+
656
+
657
+ class TFAutoModelForTableQuestionAnswering(_BaseAutoModelClass):
658
+ _model_mapping = TF_MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING
659
+
660
+
661
+ TFAutoModelForTableQuestionAnswering = auto_class_update(
662
+ TFAutoModelForTableQuestionAnswering,
663
+ head_doc="table question answering",
664
+ checkpoint_for_example="google/tapas-base-finetuned-wtq",
665
+ )
666
+
667
+
668
+ class TFAutoModelForTokenClassification(_BaseAutoModelClass):
669
+ _model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
670
+
671
+
672
+ TFAutoModelForTokenClassification = auto_class_update(
673
+ TFAutoModelForTokenClassification, head_doc="token classification"
674
+ )
675
+
676
+
677
+ class TFAutoModelForMultipleChoice(_BaseAutoModelClass):
678
+ _model_mapping = TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
679
+
680
+
681
+ TFAutoModelForMultipleChoice = auto_class_update(TFAutoModelForMultipleChoice, head_doc="multiple choice")
682
+
683
+
684
+ class TFAutoModelForNextSentencePrediction(_BaseAutoModelClass):
685
+ _model_mapping = TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
686
+
687
+
688
+ TFAutoModelForNextSentencePrediction = auto_class_update(
689
+ TFAutoModelForNextSentencePrediction, head_doc="next sentence prediction"
690
+ )
691
+
692
+
693
+ class TFAutoModelForSpeechSeq2Seq(_BaseAutoModelClass):
694
+ _model_mapping = TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
695
+
696
+
697
+ TFAutoModelForSpeechSeq2Seq = auto_class_update(
698
+ TFAutoModelForSpeechSeq2Seq, head_doc="sequence-to-sequence speech-to-text modeling"
699
+ )
700
+
701
+
702
+ class TFAutoModelWithLMHead(_TFAutoModelWithLMHead):
703
+ @classmethod
704
+ def from_config(cls, config):
705
+ warnings.warn(
706
+ "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use"
707
+ " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models"
708
+ " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.",
709
+ FutureWarning,
710
+ )
711
+ return super().from_config(config)
712
+
713
+ @classmethod
714
+ def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
715
+ warnings.warn(
716
+ "The class `TFAutoModelWithLMHead` is deprecated and will be removed in a future version. Please use"
717
+ " `TFAutoModelForCausalLM` for causal language models, `TFAutoModelForMaskedLM` for masked language models"
718
+ " and `TFAutoModelForSeq2SeqLM` for encoder-decoder models.",
719
+ FutureWarning,
720
+ )
721
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/processing_auto.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ AutoProcessor class."""
16
+ import importlib
17
+ import inspect
18
+ import json
19
+ import os
20
+ import warnings
21
+ from collections import OrderedDict
22
+
23
+ # Build the list of all feature extractors
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
26
+ from ...feature_extraction_utils import FeatureExtractionMixin
27
+ from ...image_processing_utils import ImageProcessingMixin
28
+ from ...processing_utils import ProcessorMixin
29
+ from ...tokenization_utils import TOKENIZER_CONFIG_FILE
30
+ from ...utils import FEATURE_EXTRACTOR_NAME, PROCESSOR_NAME, get_file_from_repo, logging
31
+ from .auto_factory import _LazyAutoMapping
32
+ from .configuration_auto import (
33
+ CONFIG_MAPPING_NAMES,
34
+ AutoConfig,
35
+ model_type_to_module_name,
36
+ replace_list_option_in_docstrings,
37
+ )
38
+ from .feature_extraction_auto import AutoFeatureExtractor
39
+ from .image_processing_auto import AutoImageProcessor
40
+ from .tokenization_auto import AutoTokenizer
41
+
42
+
43
+ logger = logging.get_logger(__name__)
44
+
45
+ PROCESSOR_MAPPING_NAMES = OrderedDict(
46
+ [
47
+ ("align", "AlignProcessor"),
48
+ ("altclip", "AltCLIPProcessor"),
49
+ ("bark", "BarkProcessor"),
50
+ ("blip", "BlipProcessor"),
51
+ ("blip-2", "Blip2Processor"),
52
+ ("bridgetower", "BridgeTowerProcessor"),
53
+ ("chinese_clip", "ChineseCLIPProcessor"),
54
+ ("clap", "ClapProcessor"),
55
+ ("clip", "CLIPProcessor"),
56
+ ("clipseg", "CLIPSegProcessor"),
57
+ ("clvp", "ClvpProcessor"),
58
+ ("flava", "FlavaProcessor"),
59
+ ("fuyu", "FuyuProcessor"),
60
+ ("git", "GitProcessor"),
61
+ ("groupvit", "CLIPProcessor"),
62
+ ("hubert", "Wav2Vec2Processor"),
63
+ ("idefics", "IdeficsProcessor"),
64
+ ("idefics2", "Idefics2Processor"),
65
+ ("instructblip", "InstructBlipProcessor"),
66
+ ("kosmos-2", "Kosmos2Processor"),
67
+ ("layoutlmv2", "LayoutLMv2Processor"),
68
+ ("layoutlmv3", "LayoutLMv3Processor"),
69
+ ("llava", "LlavaProcessor"),
70
+ ("llava_next", "LlavaNextProcessor"),
71
+ ("markuplm", "MarkupLMProcessor"),
72
+ ("mctct", "MCTCTProcessor"),
73
+ ("mgp-str", "MgpstrProcessor"),
74
+ ("oneformer", "OneFormerProcessor"),
75
+ ("owlv2", "Owlv2Processor"),
76
+ ("owlvit", "OwlViTProcessor"),
77
+ ("pix2struct", "Pix2StructProcessor"),
78
+ ("pop2piano", "Pop2PianoProcessor"),
79
+ ("sam", "SamProcessor"),
80
+ ("seamless_m4t", "SeamlessM4TProcessor"),
81
+ ("sew", "Wav2Vec2Processor"),
82
+ ("sew-d", "Wav2Vec2Processor"),
83
+ ("siglip", "SiglipProcessor"),
84
+ ("speech_to_text", "Speech2TextProcessor"),
85
+ ("speech_to_text_2", "Speech2Text2Processor"),
86
+ ("speecht5", "SpeechT5Processor"),
87
+ ("trocr", "TrOCRProcessor"),
88
+ ("tvlt", "TvltProcessor"),
89
+ ("tvp", "TvpProcessor"),
90
+ ("unispeech", "Wav2Vec2Processor"),
91
+ ("unispeech-sat", "Wav2Vec2Processor"),
92
+ ("vilt", "ViltProcessor"),
93
+ ("vipllava", "LlavaProcessor"),
94
+ ("vision-text-dual-encoder", "VisionTextDualEncoderProcessor"),
95
+ ("wav2vec2", "Wav2Vec2Processor"),
96
+ ("wav2vec2-bert", "Wav2Vec2Processor"),
97
+ ("wav2vec2-conformer", "Wav2Vec2Processor"),
98
+ ("wavlm", "Wav2Vec2Processor"),
99
+ ("whisper", "WhisperProcessor"),
100
+ ("xclip", "XCLIPProcessor"),
101
+ ]
102
+ )
103
+
104
+ PROCESSOR_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, PROCESSOR_MAPPING_NAMES)
105
+
106
+
107
+ def processor_class_from_name(class_name: str):
108
+ for module_name, processors in PROCESSOR_MAPPING_NAMES.items():
109
+ if class_name in processors:
110
+ module_name = model_type_to_module_name(module_name)
111
+
112
+ module = importlib.import_module(f".{module_name}", "transformers.models")
113
+ try:
114
+ return getattr(module, class_name)
115
+ except AttributeError:
116
+ continue
117
+
118
+ for processor in PROCESSOR_MAPPING._extra_content.values():
119
+ if getattr(processor, "__name__", None) == class_name:
120
+ return processor
121
+
122
+ # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
123
+ # init and we return the proper dummy to get an appropriate error message.
124
+ main_module = importlib.import_module("transformers")
125
+ if hasattr(main_module, class_name):
126
+ return getattr(main_module, class_name)
127
+
128
+ return None
129
+
130
+
131
+ class AutoProcessor:
132
+ r"""
133
+ This is a generic processor class that will be instantiated as one of the processor classes of the library when
134
+ created with the [`AutoProcessor.from_pretrained`] class method.
135
+
136
+ This class cannot be instantiated directly using `__init__()` (throws an error).
137
+ """
138
+
139
+ def __init__(self):
140
+ raise EnvironmentError(
141
+ "AutoProcessor is designed to be instantiated "
142
+ "using the `AutoProcessor.from_pretrained(pretrained_model_name_or_path)` method."
143
+ )
144
+
145
+ @classmethod
146
+ @replace_list_option_in_docstrings(PROCESSOR_MAPPING_NAMES)
147
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs):
148
+ r"""
149
+ Instantiate one of the processor classes of the library from a pretrained model vocabulary.
150
+
151
+ The processor class to instantiate is selected based on the `model_type` property of the config object (either
152
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible):
153
+
154
+ List options
155
+
156
+ Params:
157
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
158
+ This can be either:
159
+
160
+ - a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
161
+ huggingface.co.
162
+ - a path to a *directory* containing a processor files saved using the `save_pretrained()` method,
163
+ e.g., `./my_model_directory/`.
164
+ cache_dir (`str` or `os.PathLike`, *optional*):
165
+ Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
166
+ standard cache should not be used.
167
+ force_download (`bool`, *optional*, defaults to `False`):
168
+ Whether or not to force to (re-)download the feature extractor files and override the cached versions
169
+ if they exist.
170
+ resume_download (`bool`, *optional*, defaults to `False`):
171
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
172
+ exists.
173
+ proxies (`Dict[str, str]`, *optional*):
174
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
175
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
176
+ token (`str` or *bool*, *optional*):
177
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
178
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
179
+ revision (`str`, *optional*, defaults to `"main"`):
180
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
181
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
182
+ identifier allowed by git.
183
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
184
+ If `False`, then this function returns just the final feature extractor object. If `True`, then this
185
+ functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
186
+ consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
187
+ `kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
188
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
189
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
190
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
191
+ execute code present on the Hub on your local machine.
192
+ kwargs (`Dict[str, Any]`, *optional*):
193
+ The values in kwargs of any keys which are feature extractor attributes will be used to override the
194
+ loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
195
+ controlled by the `return_unused_kwargs` keyword parameter.
196
+
197
+ <Tip>
198
+
199
+ Passing `token=True` is required when you want to use a private model.
200
+
201
+ </Tip>
202
+
203
+ Examples:
204
+
205
+ ```python
206
+ >>> from transformers import AutoProcessor
207
+
208
+ >>> # Download processor from huggingface.co and cache.
209
+ >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h")
210
+
211
+ >>> # If processor files are in a directory (e.g. processor was saved using *save_pretrained('./test/saved_model/')*)
212
+ >>> # processor = AutoProcessor.from_pretrained("./test/saved_model/")
213
+ ```"""
214
+ use_auth_token = kwargs.pop("use_auth_token", None)
215
+ if use_auth_token is not None:
216
+ warnings.warn(
217
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
218
+ FutureWarning,
219
+ )
220
+ if kwargs.get("token", None) is not None:
221
+ raise ValueError(
222
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
223
+ )
224
+ kwargs["token"] = use_auth_token
225
+
226
+ config = kwargs.pop("config", None)
227
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
228
+ kwargs["_from_auto"] = True
229
+
230
+ processor_class = None
231
+ processor_auto_map = None
232
+
233
+ # First, let's see if we have a processor or preprocessor config.
234
+ # Filter the kwargs for `get_file_from_repo`.
235
+ get_file_from_repo_kwargs = {
236
+ key: kwargs[key] for key in inspect.signature(get_file_from_repo).parameters.keys() if key in kwargs
237
+ }
238
+
239
+ # Let's start by checking whether the processor class is saved in a processor config
240
+ processor_config_file = get_file_from_repo(
241
+ pretrained_model_name_or_path, PROCESSOR_NAME, **get_file_from_repo_kwargs
242
+ )
243
+ if processor_config_file is not None:
244
+ config_dict, _ = ProcessorMixin.get_processor_dict(pretrained_model_name_or_path, **kwargs)
245
+ processor_class = config_dict.get("processor_class", None)
246
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
247
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
248
+
249
+ if processor_class is None:
250
+ # If not found, let's check whether the processor class is saved in an image processor config
251
+ preprocessor_config_file = get_file_from_repo(
252
+ pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME, **get_file_from_repo_kwargs
253
+ )
254
+ if preprocessor_config_file is not None:
255
+ config_dict, _ = ImageProcessingMixin.get_image_processor_dict(pretrained_model_name_or_path, **kwargs)
256
+ processor_class = config_dict.get("processor_class", None)
257
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
258
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
259
+
260
+ # If not found, let's check whether the processor class is saved in a feature extractor config
261
+ if preprocessor_config_file is not None and processor_class is None:
262
+ config_dict, _ = FeatureExtractionMixin.get_feature_extractor_dict(
263
+ pretrained_model_name_or_path, **kwargs
264
+ )
265
+ processor_class = config_dict.get("processor_class", None)
266
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
267
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
268
+
269
+ if processor_class is None:
270
+ # Next, let's check whether the processor class is saved in a tokenizer
271
+ tokenizer_config_file = get_file_from_repo(
272
+ pretrained_model_name_or_path, TOKENIZER_CONFIG_FILE, **get_file_from_repo_kwargs
273
+ )
274
+ if tokenizer_config_file is not None:
275
+ with open(tokenizer_config_file, encoding="utf-8") as reader:
276
+ config_dict = json.load(reader)
277
+
278
+ processor_class = config_dict.get("processor_class", None)
279
+ if "AutoProcessor" in config_dict.get("auto_map", {}):
280
+ processor_auto_map = config_dict["auto_map"]["AutoProcessor"]
281
+
282
+ if processor_class is None:
283
+ # Otherwise, load config, if it can be loaded.
284
+ if not isinstance(config, PretrainedConfig):
285
+ config = AutoConfig.from_pretrained(
286
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
287
+ )
288
+
289
+ # And check if the config contains the processor class.
290
+ processor_class = getattr(config, "processor_class", None)
291
+ if hasattr(config, "auto_map") and "AutoProcessor" in config.auto_map:
292
+ processor_auto_map = config.auto_map["AutoProcessor"]
293
+
294
+ if processor_class is not None:
295
+ processor_class = processor_class_from_name(processor_class)
296
+
297
+ has_remote_code = processor_auto_map is not None
298
+ has_local_code = processor_class is not None or type(config) in PROCESSOR_MAPPING
299
+ trust_remote_code = resolve_trust_remote_code(
300
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
301
+ )
302
+
303
+ if has_remote_code and trust_remote_code:
304
+ processor_class = get_class_from_dynamic_module(
305
+ processor_auto_map, pretrained_model_name_or_path, **kwargs
306
+ )
307
+ _ = kwargs.pop("code_revision", None)
308
+ if os.path.isdir(pretrained_model_name_or_path):
309
+ processor_class.register_for_auto_class()
310
+ return processor_class.from_pretrained(
311
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
312
+ )
313
+ elif processor_class is not None:
314
+ return processor_class.from_pretrained(
315
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
316
+ )
317
+ # Last try: we use the PROCESSOR_MAPPING.
318
+ elif type(config) in PROCESSOR_MAPPING:
319
+ return PROCESSOR_MAPPING[type(config)].from_pretrained(pretrained_model_name_or_path, **kwargs)
320
+
321
+ # At this stage, there doesn't seem to be a `Processor` class available for this model, so let's try a
322
+ # tokenizer.
323
+ try:
324
+ return AutoTokenizer.from_pretrained(
325
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
326
+ )
327
+ except Exception:
328
+ try:
329
+ return AutoImageProcessor.from_pretrained(
330
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
331
+ )
332
+ except Exception:
333
+ pass
334
+
335
+ try:
336
+ return AutoFeatureExtractor.from_pretrained(
337
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
338
+ )
339
+ except Exception:
340
+ pass
341
+
342
+ raise ValueError(
343
+ f"Unrecognized processing class in {pretrained_model_name_or_path}. Can't instantiate a processor, a "
344
+ "tokenizer, an image processor or a feature extractor for this model. Make sure the repository contains "
345
+ "the files of at least one of those processing classes."
346
+ )
347
+
348
+ @staticmethod
349
+ def register(config_class, processor_class, exist_ok=False):
350
+ """
351
+ Register a new processor for this class.
352
+
353
+ Args:
354
+ config_class ([`PretrainedConfig`]):
355
+ The configuration corresponding to the model to register.
356
+ processor_class ([`FeatureExtractorMixin`]): The processor to register.
357
+ """
358
+ PROCESSOR_MAPPING.register(config_class, processor_class, exist_ok=exist_ok)
llmeval-env/lib/python3.10/site-packages/transformers/models/auto/tokenization_auto.py ADDED
@@ -0,0 +1,936 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Auto Tokenizer class."""
16
+
17
+ import importlib
18
+ import json
19
+ import os
20
+ import warnings
21
+ from collections import OrderedDict
22
+ from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union
23
+
24
+ from ...configuration_utils import PretrainedConfig
25
+ from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
26
+ from ...tokenization_utils import PreTrainedTokenizer
27
+ from ...tokenization_utils_base import TOKENIZER_CONFIG_FILE
28
+ from ...utils import (
29
+ cached_file,
30
+ extract_commit_hash,
31
+ is_g2p_en_available,
32
+ is_sentencepiece_available,
33
+ is_tokenizers_available,
34
+ logging,
35
+ )
36
+ from ..encoder_decoder import EncoderDecoderConfig
37
+ from .auto_factory import _LazyAutoMapping
38
+ from .configuration_auto import (
39
+ CONFIG_MAPPING_NAMES,
40
+ AutoConfig,
41
+ config_class_to_model_type,
42
+ model_type_to_module_name,
43
+ replace_list_option_in_docstrings,
44
+ )
45
+
46
+
47
+ if is_tokenizers_available():
48
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
49
+ else:
50
+ PreTrainedTokenizerFast = None
51
+
52
+
53
+ logger = logging.get_logger(__name__)
54
+
55
+ if TYPE_CHECKING:
56
+ # This significantly improves completion suggestion performance when
57
+ # the transformers package is used with Microsoft's Pylance language server.
58
+ TOKENIZER_MAPPING_NAMES: OrderedDict[str, Tuple[Optional[str], Optional[str]]] = OrderedDict()
59
+ else:
60
+ TOKENIZER_MAPPING_NAMES = OrderedDict(
61
+ [
62
+ (
63
+ "albert",
64
+ (
65
+ "AlbertTokenizer" if is_sentencepiece_available() else None,
66
+ "AlbertTokenizerFast" if is_tokenizers_available() else None,
67
+ ),
68
+ ),
69
+ ("align", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
70
+ ("bark", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
71
+ ("bart", ("BartTokenizer", "BartTokenizerFast")),
72
+ (
73
+ "barthez",
74
+ (
75
+ "BarthezTokenizer" if is_sentencepiece_available() else None,
76
+ "BarthezTokenizerFast" if is_tokenizers_available() else None,
77
+ ),
78
+ ),
79
+ ("bartpho", ("BartphoTokenizer", None)),
80
+ ("bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
81
+ ("bert-generation", ("BertGenerationTokenizer" if is_sentencepiece_available() else None, None)),
82
+ ("bert-japanese", ("BertJapaneseTokenizer", None)),
83
+ ("bertweet", ("BertweetTokenizer", None)),
84
+ (
85
+ "big_bird",
86
+ (
87
+ "BigBirdTokenizer" if is_sentencepiece_available() else None,
88
+ "BigBirdTokenizerFast" if is_tokenizers_available() else None,
89
+ ),
90
+ ),
91
+ ("bigbird_pegasus", ("PegasusTokenizer", "PegasusTokenizerFast" if is_tokenizers_available() else None)),
92
+ ("biogpt", ("BioGptTokenizer", None)),
93
+ ("blenderbot", ("BlenderbotTokenizer", "BlenderbotTokenizerFast")),
94
+ ("blenderbot-small", ("BlenderbotSmallTokenizer", None)),
95
+ ("blip", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
96
+ ("blip-2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
97
+ ("bloom", (None, "BloomTokenizerFast" if is_tokenizers_available() else None)),
98
+ ("bridgetower", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
99
+ ("bros", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
100
+ ("byt5", ("ByT5Tokenizer", None)),
101
+ (
102
+ "camembert",
103
+ (
104
+ "CamembertTokenizer" if is_sentencepiece_available() else None,
105
+ "CamembertTokenizerFast" if is_tokenizers_available() else None,
106
+ ),
107
+ ),
108
+ ("canine", ("CanineTokenizer", None)),
109
+ ("chinese_clip", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
110
+ (
111
+ "clap",
112
+ (
113
+ "RobertaTokenizer",
114
+ "RobertaTokenizerFast" if is_tokenizers_available() else None,
115
+ ),
116
+ ),
117
+ (
118
+ "clip",
119
+ (
120
+ "CLIPTokenizer",
121
+ "CLIPTokenizerFast" if is_tokenizers_available() else None,
122
+ ),
123
+ ),
124
+ (
125
+ "clipseg",
126
+ (
127
+ "CLIPTokenizer",
128
+ "CLIPTokenizerFast" if is_tokenizers_available() else None,
129
+ ),
130
+ ),
131
+ ("clvp", ("ClvpTokenizer", None)),
132
+ (
133
+ "code_llama",
134
+ (
135
+ "CodeLlamaTokenizer" if is_sentencepiece_available() else None,
136
+ "CodeLlamaTokenizerFast" if is_tokenizers_available() else None,
137
+ ),
138
+ ),
139
+ ("codegen", ("CodeGenTokenizer", "CodeGenTokenizerFast" if is_tokenizers_available() else None)),
140
+ ("cohere", (None, "CohereTokenizerFast" if is_tokenizers_available() else None)),
141
+ ("convbert", ("ConvBertTokenizer", "ConvBertTokenizerFast" if is_tokenizers_available() else None)),
142
+ (
143
+ "cpm",
144
+ (
145
+ "CpmTokenizer" if is_sentencepiece_available() else None,
146
+ "CpmTokenizerFast" if is_tokenizers_available() else None,
147
+ ),
148
+ ),
149
+ ("cpmant", ("CpmAntTokenizer", None)),
150
+ ("ctrl", ("CTRLTokenizer", None)),
151
+ ("data2vec-audio", ("Wav2Vec2CTCTokenizer", None)),
152
+ ("data2vec-text", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
153
+ ("dbrx", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
154
+ ("deberta", ("DebertaTokenizer", "DebertaTokenizerFast" if is_tokenizers_available() else None)),
155
+ (
156
+ "deberta-v2",
157
+ (
158
+ "DebertaV2Tokenizer" if is_sentencepiece_available() else None,
159
+ "DebertaV2TokenizerFast" if is_tokenizers_available() else None,
160
+ ),
161
+ ),
162
+ ("distilbert", ("DistilBertTokenizer", "DistilBertTokenizerFast" if is_tokenizers_available() else None)),
163
+ (
164
+ "dpr",
165
+ (
166
+ "DPRQuestionEncoderTokenizer",
167
+ "DPRQuestionEncoderTokenizerFast" if is_tokenizers_available() else None,
168
+ ),
169
+ ),
170
+ ("electra", ("ElectraTokenizer", "ElectraTokenizerFast" if is_tokenizers_available() else None)),
171
+ ("ernie", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
172
+ ("ernie_m", ("ErnieMTokenizer" if is_sentencepiece_available() else None, None)),
173
+ ("esm", ("EsmTokenizer", None)),
174
+ ("falcon", (None, "PreTrainedTokenizerFast" if is_tokenizers_available() else None)),
175
+ (
176
+ "fastspeech2_conformer",
177
+ ("FastSpeech2ConformerTokenizer" if is_g2p_en_available() else None, None),
178
+ ),
179
+ ("flaubert", ("FlaubertTokenizer", None)),
180
+ ("fnet", ("FNetTokenizer", "FNetTokenizerFast" if is_tokenizers_available() else None)),
181
+ ("fsmt", ("FSMTTokenizer", None)),
182
+ ("funnel", ("FunnelTokenizer", "FunnelTokenizerFast" if is_tokenizers_available() else None)),
183
+ (
184
+ "gemma",
185
+ (
186
+ "GemmaTokenizer" if is_sentencepiece_available() else None,
187
+ "GemmaTokenizerFast" if is_tokenizers_available() else None,
188
+ ),
189
+ ),
190
+ ("git", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
191
+ ("gpt-sw3", ("GPTSw3Tokenizer" if is_sentencepiece_available() else None, None)),
192
+ ("gpt2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
193
+ ("gpt_bigcode", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
194
+ ("gpt_neo", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
195
+ ("gpt_neox", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
196
+ ("gpt_neox_japanese", ("GPTNeoXJapaneseTokenizer", None)),
197
+ ("gptj", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
198
+ ("gptsan-japanese", ("GPTSanJapaneseTokenizer", None)),
199
+ ("grounding-dino", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
200
+ ("groupvit", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
201
+ ("herbert", ("HerbertTokenizer", "HerbertTokenizerFast" if is_tokenizers_available() else None)),
202
+ ("hubert", ("Wav2Vec2CTCTokenizer", None)),
203
+ ("ibert", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
204
+ ("idefics", (None, "LlamaTokenizerFast" if is_tokenizers_available() else None)),
205
+ ("idefics2", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
206
+ ("instructblip", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
207
+ (
208
+ "jamba",
209
+ (
210
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
211
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
212
+ ),
213
+ ),
214
+ ("jukebox", ("JukeboxTokenizer", None)),
215
+ (
216
+ "kosmos-2",
217
+ (
218
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
219
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
220
+ ),
221
+ ),
222
+ ("layoutlm", ("LayoutLMTokenizer", "LayoutLMTokenizerFast" if is_tokenizers_available() else None)),
223
+ ("layoutlmv2", ("LayoutLMv2Tokenizer", "LayoutLMv2TokenizerFast" if is_tokenizers_available() else None)),
224
+ ("layoutlmv3", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)),
225
+ ("layoutxlm", ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast" if is_tokenizers_available() else None)),
226
+ ("led", ("LEDTokenizer", "LEDTokenizerFast" if is_tokenizers_available() else None)),
227
+ ("lilt", ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast" if is_tokenizers_available() else None)),
228
+ (
229
+ "llama",
230
+ (
231
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
232
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
233
+ ),
234
+ ),
235
+ ("llava", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
236
+ ("llava_next", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
237
+ ("longformer", ("LongformerTokenizer", "LongformerTokenizerFast" if is_tokenizers_available() else None)),
238
+ (
239
+ "longt5",
240
+ (
241
+ "T5Tokenizer" if is_sentencepiece_available() else None,
242
+ "T5TokenizerFast" if is_tokenizers_available() else None,
243
+ ),
244
+ ),
245
+ ("luke", ("LukeTokenizer", None)),
246
+ ("lxmert", ("LxmertTokenizer", "LxmertTokenizerFast" if is_tokenizers_available() else None)),
247
+ ("m2m_100", ("M2M100Tokenizer" if is_sentencepiece_available() else None, None)),
248
+ ("mamba", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
249
+ ("marian", ("MarianTokenizer" if is_sentencepiece_available() else None, None)),
250
+ (
251
+ "mbart",
252
+ (
253
+ "MBartTokenizer" if is_sentencepiece_available() else None,
254
+ "MBartTokenizerFast" if is_tokenizers_available() else None,
255
+ ),
256
+ ),
257
+ (
258
+ "mbart50",
259
+ (
260
+ "MBart50Tokenizer" if is_sentencepiece_available() else None,
261
+ "MBart50TokenizerFast" if is_tokenizers_available() else None,
262
+ ),
263
+ ),
264
+ ("mega", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
265
+ ("megatron-bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
266
+ ("mgp-str", ("MgpstrTokenizer", None)),
267
+ (
268
+ "mistral",
269
+ (
270
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
271
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
272
+ ),
273
+ ),
274
+ (
275
+ "mixtral",
276
+ (
277
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
278
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
279
+ ),
280
+ ),
281
+ ("mluke", ("MLukeTokenizer" if is_sentencepiece_available() else None, None)),
282
+ ("mobilebert", ("MobileBertTokenizer", "MobileBertTokenizerFast" if is_tokenizers_available() else None)),
283
+ ("mpnet", ("MPNetTokenizer", "MPNetTokenizerFast" if is_tokenizers_available() else None)),
284
+ ("mpt", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
285
+ ("mra", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
286
+ (
287
+ "mt5",
288
+ (
289
+ "MT5Tokenizer" if is_sentencepiece_available() else None,
290
+ "MT5TokenizerFast" if is_tokenizers_available() else None,
291
+ ),
292
+ ),
293
+ ("musicgen", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)),
294
+ ("musicgen_melody", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)),
295
+ ("mvp", ("MvpTokenizer", "MvpTokenizerFast" if is_tokenizers_available() else None)),
296
+ ("nezha", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
297
+ (
298
+ "nllb",
299
+ (
300
+ "NllbTokenizer" if is_sentencepiece_available() else None,
301
+ "NllbTokenizerFast" if is_tokenizers_available() else None,
302
+ ),
303
+ ),
304
+ (
305
+ "nllb-moe",
306
+ (
307
+ "NllbTokenizer" if is_sentencepiece_available() else None,
308
+ "NllbTokenizerFast" if is_tokenizers_available() else None,
309
+ ),
310
+ ),
311
+ (
312
+ "nystromformer",
313
+ (
314
+ "AlbertTokenizer" if is_sentencepiece_available() else None,
315
+ "AlbertTokenizerFast" if is_tokenizers_available() else None,
316
+ ),
317
+ ),
318
+ ("olmo", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
319
+ ("oneformer", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
320
+ (
321
+ "openai-gpt",
322
+ ("OpenAIGPTTokenizer", "OpenAIGPTTokenizerFast" if is_tokenizers_available() else None),
323
+ ),
324
+ ("opt", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
325
+ ("owlv2", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
326
+ ("owlvit", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
327
+ (
328
+ "pegasus",
329
+ (
330
+ "PegasusTokenizer" if is_sentencepiece_available() else None,
331
+ "PegasusTokenizerFast" if is_tokenizers_available() else None,
332
+ ),
333
+ ),
334
+ (
335
+ "pegasus_x",
336
+ (
337
+ "PegasusTokenizer" if is_sentencepiece_available() else None,
338
+ "PegasusTokenizerFast" if is_tokenizers_available() else None,
339
+ ),
340
+ ),
341
+ (
342
+ "perceiver",
343
+ (
344
+ "PerceiverTokenizer",
345
+ None,
346
+ ),
347
+ ),
348
+ (
349
+ "persimmon",
350
+ (
351
+ "LlamaTokenizer" if is_sentencepiece_available() else None,
352
+ "LlamaTokenizerFast" if is_tokenizers_available() else None,
353
+ ),
354
+ ),
355
+ ("phi", ("CodeGenTokenizer", "CodeGenTokenizerFast" if is_tokenizers_available() else None)),
356
+ ("phobert", ("PhobertTokenizer", None)),
357
+ ("pix2struct", ("T5Tokenizer", "T5TokenizerFast" if is_tokenizers_available() else None)),
358
+ ("plbart", ("PLBartTokenizer" if is_sentencepiece_available() else None, None)),
359
+ ("prophetnet", ("ProphetNetTokenizer", None)),
360
+ ("qdqbert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
361
+ (
362
+ "qwen2",
363
+ (
364
+ "Qwen2Tokenizer",
365
+ "Qwen2TokenizerFast" if is_tokenizers_available() else None,
366
+ ),
367
+ ),
368
+ (
369
+ "qwen2_moe",
370
+ (
371
+ "Qwen2Tokenizer",
372
+ "Qwen2TokenizerFast" if is_tokenizers_available() else None,
373
+ ),
374
+ ),
375
+ ("rag", ("RagTokenizer", None)),
376
+ ("realm", ("RealmTokenizer", "RealmTokenizerFast" if is_tokenizers_available() else None)),
377
+ (
378
+ "recurrent_gemma",
379
+ (
380
+ "GemmaTokenizer" if is_sentencepiece_available() else None,
381
+ "GemmaTokenizerFast" if is_tokenizers_available() else None,
382
+ ),
383
+ ),
384
+ (
385
+ "reformer",
386
+ (
387
+ "ReformerTokenizer" if is_sentencepiece_available() else None,
388
+ "ReformerTokenizerFast" if is_tokenizers_available() else None,
389
+ ),
390
+ ),
391
+ (
392
+ "rembert",
393
+ (
394
+ "RemBertTokenizer" if is_sentencepiece_available() else None,
395
+ "RemBertTokenizerFast" if is_tokenizers_available() else None,
396
+ ),
397
+ ),
398
+ ("retribert", ("RetriBertTokenizer", "RetriBertTokenizerFast" if is_tokenizers_available() else None)),
399
+ ("roberta", ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None)),
400
+ (
401
+ "roberta-prelayernorm",
402
+ ("RobertaTokenizer", "RobertaTokenizerFast" if is_tokenizers_available() else None),
403
+ ),
404
+ ("roc_bert", ("RoCBertTokenizer", None)),
405
+ ("roformer", ("RoFormerTokenizer", "RoFormerTokenizerFast" if is_tokenizers_available() else None)),
406
+ ("rwkv", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
407
+ (
408
+ "seamless_m4t",
409
+ (
410
+ "SeamlessM4TTokenizer" if is_sentencepiece_available() else None,
411
+ "SeamlessM4TTokenizerFast" if is_tokenizers_available() else None,
412
+ ),
413
+ ),
414
+ (
415
+ "seamless_m4t_v2",
416
+ (
417
+ "SeamlessM4TTokenizer" if is_sentencepiece_available() else None,
418
+ "SeamlessM4TTokenizerFast" if is_tokenizers_available() else None,
419
+ ),
420
+ ),
421
+ ("siglip", ("SiglipTokenizer" if is_sentencepiece_available() else None, None)),
422
+ ("speech_to_text", ("Speech2TextTokenizer" if is_sentencepiece_available() else None, None)),
423
+ ("speech_to_text_2", ("Speech2Text2Tokenizer", None)),
424
+ ("speecht5", ("SpeechT5Tokenizer" if is_sentencepiece_available() else None, None)),
425
+ ("splinter", ("SplinterTokenizer", "SplinterTokenizerFast")),
426
+ (
427
+ "squeezebert",
428
+ ("SqueezeBertTokenizer", "SqueezeBertTokenizerFast" if is_tokenizers_available() else None),
429
+ ),
430
+ ("stablelm", (None, "GPTNeoXTokenizerFast" if is_tokenizers_available() else None)),
431
+ ("starcoder2", ("GPT2Tokenizer", "GPT2TokenizerFast" if is_tokenizers_available() else None)),
432
+ (
433
+ "switch_transformers",
434
+ (
435
+ "T5Tokenizer" if is_sentencepiece_available() else None,
436
+ "T5TokenizerFast" if is_tokenizers_available() else None,
437
+ ),
438
+ ),
439
+ (
440
+ "t5",
441
+ (
442
+ "T5Tokenizer" if is_sentencepiece_available() else None,
443
+ "T5TokenizerFast" if is_tokenizers_available() else None,
444
+ ),
445
+ ),
446
+ ("tapas", ("TapasTokenizer", None)),
447
+ ("tapex", ("TapexTokenizer", None)),
448
+ ("transfo-xl", ("TransfoXLTokenizer", None)),
449
+ ("tvp", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
450
+ (
451
+ "udop",
452
+ (
453
+ "UdopTokenizer" if is_sentencepiece_available() else None,
454
+ "UdopTokenizerFast" if is_tokenizers_available() else None,
455
+ ),
456
+ ),
457
+ (
458
+ "umt5",
459
+ (
460
+ "T5Tokenizer" if is_sentencepiece_available() else None,
461
+ "T5TokenizerFast" if is_tokenizers_available() else None,
462
+ ),
463
+ ),
464
+ ("vilt", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
465
+ ("vipllava", ("LlamaTokenizer", "LlamaTokenizerFast" if is_tokenizers_available() else None)),
466
+ ("visual_bert", ("BertTokenizer", "BertTokenizerFast" if is_tokenizers_available() else None)),
467
+ ("vits", ("VitsTokenizer", None)),
468
+ ("wav2vec2", ("Wav2Vec2CTCTokenizer", None)),
469
+ ("wav2vec2-bert", ("Wav2Vec2CTCTokenizer", None)),
470
+ ("wav2vec2-conformer", ("Wav2Vec2CTCTokenizer", None)),
471
+ ("wav2vec2_phoneme", ("Wav2Vec2PhonemeCTCTokenizer", None)),
472
+ ("whisper", ("WhisperTokenizer", "WhisperTokenizerFast" if is_tokenizers_available() else None)),
473
+ ("xclip", ("CLIPTokenizer", "CLIPTokenizerFast" if is_tokenizers_available() else None)),
474
+ (
475
+ "xglm",
476
+ (
477
+ "XGLMTokenizer" if is_sentencepiece_available() else None,
478
+ "XGLMTokenizerFast" if is_tokenizers_available() else None,
479
+ ),
480
+ ),
481
+ ("xlm", ("XLMTokenizer", None)),
482
+ ("xlm-prophetnet", ("XLMProphetNetTokenizer" if is_sentencepiece_available() else None, None)),
483
+ (
484
+ "xlm-roberta",
485
+ (
486
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
487
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
488
+ ),
489
+ ),
490
+ (
491
+ "xlm-roberta-xl",
492
+ (
493
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
494
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
495
+ ),
496
+ ),
497
+ (
498
+ "xlnet",
499
+ (
500
+ "XLNetTokenizer" if is_sentencepiece_available() else None,
501
+ "XLNetTokenizerFast" if is_tokenizers_available() else None,
502
+ ),
503
+ ),
504
+ (
505
+ "xmod",
506
+ (
507
+ "XLMRobertaTokenizer" if is_sentencepiece_available() else None,
508
+ "XLMRobertaTokenizerFast" if is_tokenizers_available() else None,
509
+ ),
510
+ ),
511
+ (
512
+ "yoso",
513
+ (
514
+ "AlbertTokenizer" if is_sentencepiece_available() else None,
515
+ "AlbertTokenizerFast" if is_tokenizers_available() else None,
516
+ ),
517
+ ),
518
+ ]
519
+ )
520
+
521
+ TOKENIZER_MAPPING = _LazyAutoMapping(CONFIG_MAPPING_NAMES, TOKENIZER_MAPPING_NAMES)
522
+
523
+ CONFIG_TO_TYPE = {v: k for k, v in CONFIG_MAPPING_NAMES.items()}
524
+
525
+
526
+ def tokenizer_class_from_name(class_name: str):
527
+ if class_name == "PreTrainedTokenizerFast":
528
+ return PreTrainedTokenizerFast
529
+
530
+ for module_name, tokenizers in TOKENIZER_MAPPING_NAMES.items():
531
+ if class_name in tokenizers:
532
+ module_name = model_type_to_module_name(module_name)
533
+
534
+ module = importlib.import_module(f".{module_name}", "transformers.models")
535
+ try:
536
+ return getattr(module, class_name)
537
+ except AttributeError:
538
+ continue
539
+
540
+ for config, tokenizers in TOKENIZER_MAPPING._extra_content.items():
541
+ for tokenizer in tokenizers:
542
+ if getattr(tokenizer, "__name__", None) == class_name:
543
+ return tokenizer
544
+
545
+ # We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
546
+ # init and we return the proper dummy to get an appropriate error message.
547
+ main_module = importlib.import_module("transformers")
548
+ if hasattr(main_module, class_name):
549
+ return getattr(main_module, class_name)
550
+
551
+ return None
552
+
553
+
554
+ def get_tokenizer_config(
555
+ pretrained_model_name_or_path: Union[str, os.PathLike],
556
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
557
+ force_download: bool = False,
558
+ resume_download: bool = False,
559
+ proxies: Optional[Dict[str, str]] = None,
560
+ token: Optional[Union[bool, str]] = None,
561
+ revision: Optional[str] = None,
562
+ local_files_only: bool = False,
563
+ subfolder: str = "",
564
+ **kwargs,
565
+ ):
566
+ """
567
+ Loads the tokenizer configuration from a pretrained model tokenizer configuration.
568
+
569
+ Args:
570
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
571
+ This can be either:
572
+
573
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
574
+ huggingface.co.
575
+ - a path to a *directory* containing a configuration file saved using the
576
+ [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
577
+
578
+ cache_dir (`str` or `os.PathLike`, *optional*):
579
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the standard
580
+ cache should not be used.
581
+ force_download (`bool`, *optional*, defaults to `False`):
582
+ Whether or not to force to (re-)download the configuration files and override the cached versions if they
583
+ exist.
584
+ resume_download (`bool`, *optional*, defaults to `False`):
585
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file exists.
586
+ proxies (`Dict[str, str]`, *optional*):
587
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
588
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
589
+ token (`str` or *bool*, *optional*):
590
+ The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
591
+ when running `huggingface-cli login` (stored in `~/.huggingface`).
592
+ revision (`str`, *optional*, defaults to `"main"`):
593
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
594
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
595
+ identifier allowed by git.
596
+ local_files_only (`bool`, *optional*, defaults to `False`):
597
+ If `True`, will only try to load the tokenizer configuration from local files.
598
+ subfolder (`str`, *optional*, defaults to `""`):
599
+ In case the tokenizer config is located inside a subfolder of the model repo on huggingface.co, you can
600
+ specify the folder name here.
601
+
602
+ <Tip>
603
+
604
+ Passing `token=True` is required when you want to use a private model.
605
+
606
+ </Tip>
607
+
608
+ Returns:
609
+ `Dict`: The configuration of the tokenizer.
610
+
611
+ Examples:
612
+
613
+ ```python
614
+ # Download configuration from huggingface.co and cache.
615
+ tokenizer_config = get_tokenizer_config("google-bert/bert-base-uncased")
616
+ # This model does not have a tokenizer config so the result will be an empty dict.
617
+ tokenizer_config = get_tokenizer_config("FacebookAI/xlm-roberta-base")
618
+
619
+ # Save a pretrained tokenizer locally and you can reload its config
620
+ from transformers import AutoTokenizer
621
+
622
+ tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
623
+ tokenizer.save_pretrained("tokenizer-test")
624
+ tokenizer_config = get_tokenizer_config("tokenizer-test")
625
+ ```"""
626
+ use_auth_token = kwargs.pop("use_auth_token", None)
627
+ if use_auth_token is not None:
628
+ warnings.warn(
629
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
630
+ FutureWarning,
631
+ )
632
+ if token is not None:
633
+ raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
634
+ token = use_auth_token
635
+
636
+ commit_hash = kwargs.get("_commit_hash", None)
637
+ resolved_config_file = cached_file(
638
+ pretrained_model_name_or_path,
639
+ TOKENIZER_CONFIG_FILE,
640
+ cache_dir=cache_dir,
641
+ force_download=force_download,
642
+ resume_download=resume_download,
643
+ proxies=proxies,
644
+ token=token,
645
+ revision=revision,
646
+ local_files_only=local_files_only,
647
+ subfolder=subfolder,
648
+ _raise_exceptions_for_gated_repo=False,
649
+ _raise_exceptions_for_missing_entries=False,
650
+ _raise_exceptions_for_connection_errors=False,
651
+ _commit_hash=commit_hash,
652
+ )
653
+ if resolved_config_file is None:
654
+ logger.info("Could not locate the tokenizer configuration file, will try to use the model config instead.")
655
+ return {}
656
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
657
+
658
+ with open(resolved_config_file, encoding="utf-8") as reader:
659
+ result = json.load(reader)
660
+ result["_commit_hash"] = commit_hash
661
+ return result
662
+
663
+
664
+ class AutoTokenizer:
665
+ r"""
666
+ This is a generic tokenizer class that will be instantiated as one of the tokenizer classes of the library when
667
+ created with the [`AutoTokenizer.from_pretrained`] class method.
668
+
669
+ This class cannot be instantiated directly using `__init__()` (throws an error).
670
+ """
671
+
672
+ def __init__(self):
673
+ raise EnvironmentError(
674
+ "AutoTokenizer is designed to be instantiated "
675
+ "using the `AutoTokenizer.from_pretrained(pretrained_model_name_or_path)` method."
676
+ )
677
+
678
+ @classmethod
679
+ @replace_list_option_in_docstrings(TOKENIZER_MAPPING_NAMES)
680
+ def from_pretrained(cls, pretrained_model_name_or_path, *inputs, **kwargs):
681
+ r"""
682
+ Instantiate one of the tokenizer classes of the library from a pretrained model vocabulary.
683
+
684
+ The tokenizer class to instantiate is selected based on the `model_type` property of the config object (either
685
+ passed as an argument or loaded from `pretrained_model_name_or_path` if possible), or when it's missing, by
686
+ falling back to using pattern matching on `pretrained_model_name_or_path`:
687
+
688
+ List options
689
+
690
+ Params:
691
+ pretrained_model_name_or_path (`str` or `os.PathLike`):
692
+ Can be either:
693
+
694
+ - A string, the *model id* of a predefined tokenizer hosted inside a model repo on huggingface.co.
695
+ - A path to a *directory* containing vocabulary files required by the tokenizer, for instance saved
696
+ using the [`~PreTrainedTokenizer.save_pretrained`] method, e.g., `./my_model_directory/`.
697
+ - A path or url to a single saved vocabulary file if and only if the tokenizer only requires a
698
+ single vocabulary file (like Bert or XLNet), e.g.: `./my_model_directory/vocab.txt`. (Not
699
+ applicable to all derived classes)
700
+ inputs (additional positional arguments, *optional*):
701
+ Will be passed along to the Tokenizer `__init__()` method.
702
+ config ([`PretrainedConfig`], *optional*)
703
+ The configuration object used to determine the tokenizer class to instantiate.
704
+ cache_dir (`str` or `os.PathLike`, *optional*):
705
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
706
+ standard cache should not be used.
707
+ force_download (`bool`, *optional*, defaults to `False`):
708
+ Whether or not to force the (re-)download the model weights and configuration files and override the
709
+ cached versions if they exist.
710
+ resume_download (`bool`, *optional*, defaults to `False`):
711
+ Whether or not to delete incompletely received files. Will attempt to resume the download if such a
712
+ file exists.
713
+ proxies (`Dict[str, str]`, *optional*):
714
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
715
+ 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
716
+ revision (`str`, *optional*, defaults to `"main"`):
717
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
718
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
719
+ identifier allowed by git.
720
+ subfolder (`str`, *optional*):
721
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co (e.g. for
722
+ facebook/rag-token-base), specify it here.
723
+ use_fast (`bool`, *optional*, defaults to `True`):
724
+ Use a [fast Rust-based tokenizer](https://huggingface.co/docs/tokenizers/index) if it is supported for
725
+ a given model. If a fast tokenizer is not available for a given model, a normal Python-based tokenizer
726
+ is returned instead.
727
+ tokenizer_type (`str`, *optional*):
728
+ Tokenizer type to be loaded.
729
+ trust_remote_code (`bool`, *optional*, defaults to `False`):
730
+ Whether or not to allow for custom models defined on the Hub in their own modeling files. This option
731
+ should only be set to `True` for repositories you trust and in which you have read the code, as it will
732
+ execute code present on the Hub on your local machine.
733
+ kwargs (additional keyword arguments, *optional*):
734
+ Will be passed to the Tokenizer `__init__()` method. Can be used to set special tokens like
735
+ `bos_token`, `eos_token`, `unk_token`, `sep_token`, `pad_token`, `cls_token`, `mask_token`,
736
+ `additional_special_tokens`. See parameters in the `__init__()` for more details.
737
+
738
+ Examples:
739
+
740
+ ```python
741
+ >>> from transformers import AutoTokenizer
742
+
743
+ >>> # Download vocabulary from huggingface.co and cache.
744
+ >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
745
+
746
+ >>> # Download vocabulary from huggingface.co (user-uploaded) and cache.
747
+ >>> tokenizer = AutoTokenizer.from_pretrained("dbmdz/bert-base-german-cased")
748
+
749
+ >>> # If vocabulary files are in a directory (e.g. tokenizer was saved using *save_pretrained('./test/saved_model/')*)
750
+ >>> # tokenizer = AutoTokenizer.from_pretrained("./test/bert_saved_model/")
751
+
752
+ >>> # Download vocabulary from huggingface.co and define model-specific arguments
753
+ >>> tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base", add_prefix_space=True)
754
+ ```"""
755
+ use_auth_token = kwargs.pop("use_auth_token", None)
756
+ if use_auth_token is not None:
757
+ warnings.warn(
758
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
759
+ FutureWarning,
760
+ )
761
+ if kwargs.get("token", None) is not None:
762
+ raise ValueError(
763
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
764
+ )
765
+ kwargs["token"] = use_auth_token
766
+
767
+ config = kwargs.pop("config", None)
768
+ kwargs["_from_auto"] = True
769
+
770
+ use_fast = kwargs.pop("use_fast", True)
771
+ tokenizer_type = kwargs.pop("tokenizer_type", None)
772
+ trust_remote_code = kwargs.pop("trust_remote_code", None)
773
+
774
+ # First, let's see whether the tokenizer_type is passed so that we can leverage it
775
+ if tokenizer_type is not None:
776
+ tokenizer_class = None
777
+ tokenizer_class_tuple = TOKENIZER_MAPPING_NAMES.get(tokenizer_type, None)
778
+
779
+ if tokenizer_class_tuple is None:
780
+ raise ValueError(
781
+ f"Passed `tokenizer_type` {tokenizer_type} does not exist. `tokenizer_type` should be one of "
782
+ f"{', '.join(c for c in TOKENIZER_MAPPING_NAMES.keys())}."
783
+ )
784
+
785
+ tokenizer_class_name, tokenizer_fast_class_name = tokenizer_class_tuple
786
+
787
+ if use_fast:
788
+ if tokenizer_fast_class_name is not None:
789
+ tokenizer_class = tokenizer_class_from_name(tokenizer_fast_class_name)
790
+ else:
791
+ logger.warning(
792
+ "`use_fast` is set to `True` but the tokenizer class does not have a fast version. "
793
+ " Falling back to the slow version."
794
+ )
795
+ if tokenizer_class is None:
796
+ tokenizer_class = tokenizer_class_from_name(tokenizer_class_name)
797
+
798
+ if tokenizer_class is None:
799
+ raise ValueError(f"Tokenizer class {tokenizer_class_name} is not currently imported.")
800
+
801
+ return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
802
+
803
+ # Next, let's try to use the tokenizer_config file to get the tokenizer class.
804
+ tokenizer_config = get_tokenizer_config(pretrained_model_name_or_path, **kwargs)
805
+ if "_commit_hash" in tokenizer_config:
806
+ kwargs["_commit_hash"] = tokenizer_config["_commit_hash"]
807
+ config_tokenizer_class = tokenizer_config.get("tokenizer_class")
808
+ tokenizer_auto_map = None
809
+ if "auto_map" in tokenizer_config:
810
+ if isinstance(tokenizer_config["auto_map"], (tuple, list)):
811
+ # Legacy format for dynamic tokenizers
812
+ tokenizer_auto_map = tokenizer_config["auto_map"]
813
+ else:
814
+ tokenizer_auto_map = tokenizer_config["auto_map"].get("AutoTokenizer", None)
815
+
816
+ # If that did not work, let's try to use the config.
817
+ if config_tokenizer_class is None:
818
+ if not isinstance(config, PretrainedConfig):
819
+ config = AutoConfig.from_pretrained(
820
+ pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
821
+ )
822
+ config_tokenizer_class = config.tokenizer_class
823
+ if hasattr(config, "auto_map") and "AutoTokenizer" in config.auto_map:
824
+ tokenizer_auto_map = config.auto_map["AutoTokenizer"]
825
+
826
+ has_remote_code = tokenizer_auto_map is not None
827
+ has_local_code = type(config) in TOKENIZER_MAPPING or (
828
+ config_tokenizer_class is not None
829
+ and (
830
+ tokenizer_class_from_name(config_tokenizer_class) is not None
831
+ or tokenizer_class_from_name(config_tokenizer_class + "Fast") is not None
832
+ )
833
+ )
834
+ trust_remote_code = resolve_trust_remote_code(
835
+ trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code
836
+ )
837
+
838
+ if has_remote_code and trust_remote_code:
839
+ if use_fast and tokenizer_auto_map[1] is not None:
840
+ class_ref = tokenizer_auto_map[1]
841
+ else:
842
+ class_ref = tokenizer_auto_map[0]
843
+ tokenizer_class = get_class_from_dynamic_module(class_ref, pretrained_model_name_or_path, **kwargs)
844
+ _ = kwargs.pop("code_revision", None)
845
+ if os.path.isdir(pretrained_model_name_or_path):
846
+ tokenizer_class.register_for_auto_class()
847
+ return tokenizer_class.from_pretrained(
848
+ pretrained_model_name_or_path, *inputs, trust_remote_code=trust_remote_code, **kwargs
849
+ )
850
+ elif config_tokenizer_class is not None:
851
+ tokenizer_class = None
852
+ if use_fast and not config_tokenizer_class.endswith("Fast"):
853
+ tokenizer_class_candidate = f"{config_tokenizer_class}Fast"
854
+ tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
855
+ if tokenizer_class is None:
856
+ tokenizer_class_candidate = config_tokenizer_class
857
+ tokenizer_class = tokenizer_class_from_name(tokenizer_class_candidate)
858
+ if tokenizer_class is None:
859
+ raise ValueError(
860
+ f"Tokenizer class {tokenizer_class_candidate} does not exist or is not currently imported."
861
+ )
862
+ return tokenizer_class.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
863
+
864
+ # Otherwise we have to be creative.
865
+ # if model is an encoder decoder, the encoder tokenizer class is used by default
866
+ if isinstance(config, EncoderDecoderConfig):
867
+ if type(config.decoder) is not type(config.encoder): # noqa: E721
868
+ logger.warning(
869
+ f"The encoder model config class: {config.encoder.__class__} is different from the decoder model "
870
+ f"config class: {config.decoder.__class__}. It is not recommended to use the "
871
+ "`AutoTokenizer.from_pretrained()` method in this case. Please use the encoder and decoder "
872
+ "specific tokenizer classes."
873
+ )
874
+ config = config.encoder
875
+
876
+ model_type = config_class_to_model_type(type(config).__name__)
877
+ if model_type is not None:
878
+ tokenizer_class_py, tokenizer_class_fast = TOKENIZER_MAPPING[type(config)]
879
+ if tokenizer_class_fast and (use_fast or tokenizer_class_py is None):
880
+ return tokenizer_class_fast.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
881
+ else:
882
+ if tokenizer_class_py is not None:
883
+ return tokenizer_class_py.from_pretrained(pretrained_model_name_or_path, *inputs, **kwargs)
884
+ else:
885
+ raise ValueError(
886
+ "This tokenizer cannot be instantiated. Please make sure you have `sentencepiece` installed "
887
+ "in order to use this tokenizer."
888
+ )
889
+
890
+ raise ValueError(
891
+ f"Unrecognized configuration class {config.__class__} to build an AutoTokenizer.\n"
892
+ f"Model type should be one of {', '.join(c.__name__ for c in TOKENIZER_MAPPING.keys())}."
893
+ )
894
+
895
+ def register(config_class, slow_tokenizer_class=None, fast_tokenizer_class=None, exist_ok=False):
896
+ """
897
+ Register a new tokenizer in this mapping.
898
+
899
+
900
+ Args:
901
+ config_class ([`PretrainedConfig`]):
902
+ The configuration corresponding to the model to register.
903
+ slow_tokenizer_class ([`PretrainedTokenizer`], *optional*):
904
+ The slow tokenizer to register.
905
+ fast_tokenizer_class ([`PretrainedTokenizerFast`], *optional*):
906
+ The fast tokenizer to register.
907
+ """
908
+ if slow_tokenizer_class is None and fast_tokenizer_class is None:
909
+ raise ValueError("You need to pass either a `slow_tokenizer_class` or a `fast_tokenizer_class")
910
+ if slow_tokenizer_class is not None and issubclass(slow_tokenizer_class, PreTrainedTokenizerFast):
911
+ raise ValueError("You passed a fast tokenizer in the `slow_tokenizer_class`.")
912
+ if fast_tokenizer_class is not None and issubclass(fast_tokenizer_class, PreTrainedTokenizer):
913
+ raise ValueError("You passed a slow tokenizer in the `fast_tokenizer_class`.")
914
+
915
+ if (
916
+ slow_tokenizer_class is not None
917
+ and fast_tokenizer_class is not None
918
+ and issubclass(fast_tokenizer_class, PreTrainedTokenizerFast)
919
+ and fast_tokenizer_class.slow_tokenizer_class != slow_tokenizer_class
920
+ ):
921
+ raise ValueError(
922
+ "The fast tokenizer class you are passing has a `slow_tokenizer_class` attribute that is not "
923
+ "consistent with the slow tokenizer class you passed (fast tokenizer has "
924
+ f"{fast_tokenizer_class.slow_tokenizer_class} and you passed {slow_tokenizer_class}. Fix one of those "
925
+ "so they match!"
926
+ )
927
+
928
+ # Avoid resetting a set slow/fast tokenizer if we are passing just the other ones.
929
+ if config_class in TOKENIZER_MAPPING._extra_content:
930
+ existing_slow, existing_fast = TOKENIZER_MAPPING[config_class]
931
+ if slow_tokenizer_class is None:
932
+ slow_tokenizer_class = existing_slow
933
+ if fast_tokenizer_class is None:
934
+ fast_tokenizer_class = existing_fast
935
+
936
+ TOKENIZER_MAPPING.register(config_class, (slow_tokenizer_class, fast_tokenizer_class), exist_ok=exist_ok)
llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/__init__.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available
18
+
19
+
20
+ _import_structure = {}
21
+
22
+ try:
23
+ if not is_sentencepiece_available():
24
+ raise OptionalDependencyNotAvailable()
25
+ except OptionalDependencyNotAvailable:
26
+ pass
27
+ else:
28
+ _import_structure["tokenization_barthez"] = ["BarthezTokenizer"]
29
+
30
+ try:
31
+ if not is_tokenizers_available():
32
+ raise OptionalDependencyNotAvailable()
33
+ except OptionalDependencyNotAvailable:
34
+ pass
35
+ else:
36
+ _import_structure["tokenization_barthez_fast"] = ["BarthezTokenizerFast"]
37
+
38
+
39
+ if TYPE_CHECKING:
40
+ try:
41
+ if not is_sentencepiece_available():
42
+ raise OptionalDependencyNotAvailable()
43
+ except OptionalDependencyNotAvailable:
44
+ pass
45
+ else:
46
+ from .tokenization_barthez import BarthezTokenizer
47
+
48
+ try:
49
+ if not is_tokenizers_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .tokenization_barthez_fast import BarthezTokenizerFast
55
+
56
+ else:
57
+ import sys
58
+
59
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/barthez/tokenization_barthez_fast.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 Ecole Polytechnique and the HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License
15
+ """ Tokenization classes for the BARThez model."""
16
+
17
+
18
+ import os
19
+ from shutil import copyfile
20
+ from typing import List, Optional, Tuple
21
+
22
+ from ...tokenization_utils import AddedToken
23
+ from ...tokenization_utils_fast import PreTrainedTokenizerFast
24
+ from ...utils import is_sentencepiece_available, logging
25
+
26
+
27
+ if is_sentencepiece_available():
28
+ from .tokenization_barthez import BarthezTokenizer
29
+ else:
30
+ BarthezTokenizer = None
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
35
+
36
+
37
+ SPIECE_UNDERLINE = "▁"
38
+
39
+
40
+ class BarthezTokenizerFast(PreTrainedTokenizerFast):
41
+ """
42
+ Adapted from [`CamembertTokenizer`] and [`BartTokenizer`]. Construct a "fast" BARThez tokenizer. Based on
43
+ [SentencePiece](https://github.com/google/sentencepiece).
44
+
45
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
46
+ refer to this superclass for more information regarding those methods.
47
+
48
+ Args:
49
+ vocab_file (`str`):
50
+ [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that
51
+ contains the vocabulary necessary to instantiate a tokenizer.
52
+ bos_token (`str`, *optional*, defaults to `"<s>"`):
53
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
54
+
55
+ <Tip>
56
+
57
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
58
+ sequence. The token used is the `cls_token`.
59
+
60
+ </Tip>
61
+
62
+ eos_token (`str`, *optional*, defaults to `"</s>"`):
63
+ The end of sequence token.
64
+
65
+ <Tip>
66
+
67
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
68
+ The token used is the `sep_token`.
69
+
70
+ </Tip>
71
+
72
+ sep_token (`str`, *optional*, defaults to `"</s>"`):
73
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
74
+ sequence classification or for a text and a question for question answering. It is also used as the last
75
+ token of a sequence built with special tokens.
76
+ cls_token (`str`, *optional*, defaults to `"<s>"`):
77
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
78
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
79
+ unk_token (`str`, *optional*, defaults to `"<unk>"`):
80
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
81
+ token instead.
82
+ pad_token (`str`, *optional*, defaults to `"<pad>"`):
83
+ The token used for padding, for example when batching sequences of different lengths.
84
+ mask_token (`str`, *optional*, defaults to `"<mask>"`):
85
+ The token used for masking values. This is the token used when training this model with masked language
86
+ modeling. This is the token which the model will try to predict.
87
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`):
88
+ Additional special tokens used by the tokenizer.
89
+ """
90
+
91
+ vocab_files_names = VOCAB_FILES_NAMES
92
+ model_input_names = ["input_ids", "attention_mask"]
93
+ slow_tokenizer_class = BarthezTokenizer
94
+
95
+ def __init__(
96
+ self,
97
+ vocab_file=None,
98
+ tokenizer_file=None,
99
+ bos_token="<s>",
100
+ eos_token="</s>",
101
+ sep_token="</s>",
102
+ cls_token="<s>",
103
+ unk_token="<unk>",
104
+ pad_token="<pad>",
105
+ mask_token="<mask>",
106
+ **kwargs,
107
+ ):
108
+ # Mask token behave like a normal word, i.e. include the space before it
109
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
110
+
111
+ super().__init__(
112
+ vocab_file,
113
+ tokenizer_file=tokenizer_file,
114
+ bos_token=bos_token,
115
+ eos_token=eos_token,
116
+ unk_token=unk_token,
117
+ sep_token=sep_token,
118
+ cls_token=cls_token,
119
+ pad_token=pad_token,
120
+ mask_token=mask_token,
121
+ **kwargs,
122
+ )
123
+
124
+ self.vocab_file = vocab_file
125
+
126
+ @property
127
+ def can_save_slow_tokenizer(self) -> bool:
128
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
129
+
130
+ def build_inputs_with_special_tokens(
131
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
132
+ ) -> List[int]:
133
+ """
134
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
135
+ adding special tokens. A BARThez sequence has the following format:
136
+
137
+ - single sequence: `<s> X </s>`
138
+ - pair of sequences: `<s> A </s></s> B </s>`
139
+
140
+ Args:
141
+ token_ids_0 (`List[int]`):
142
+ List of IDs to which the special tokens will be added.
143
+ token_ids_1 (`List[int]`, *optional*):
144
+ Optional second list of IDs for sequence pairs.
145
+
146
+ Returns:
147
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
148
+ """
149
+
150
+ if token_ids_1 is None:
151
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
152
+ cls = [self.cls_token_id]
153
+ sep = [self.sep_token_id]
154
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
155
+
156
+ def create_token_type_ids_from_sequences(
157
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
158
+ ) -> List[int]:
159
+ """
160
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task.
161
+
162
+ Args:
163
+ token_ids_0 (`List[int]`):
164
+ List of IDs.
165
+ token_ids_1 (`List[int]`, *optional*):
166
+ Optional second list of IDs for sequence pairs.
167
+
168
+ Returns:
169
+ `List[int]`: List of zeros.
170
+ """
171
+ sep = [self.sep_token_id]
172
+ cls = [self.cls_token_id]
173
+
174
+ if token_ids_1 is None:
175
+ return len(cls + token_ids_0 + sep) * [0]
176
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
177
+
178
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
179
+ if not self.can_save_slow_tokenizer:
180
+ raise ValueError(
181
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
182
+ "tokenizer."
183
+ )
184
+
185
+ if not os.path.isdir(save_directory):
186
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
187
+ return
188
+ out_vocab_file = os.path.join(
189
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
190
+ )
191
+
192
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
193
+ copyfile(self.vocab_file, out_vocab_file)
194
+
195
+ return (out_vocab_file,)
llmeval-env/lib/python3.10/site-packages/transformers/models/bigbird_pegasus/__pycache__/modeling_bigbird_pegasus.cpython-310.pyc ADDED
Binary file (85.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__init__.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_imagegpt": ["IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ImageGPTConfig", "ImageGPTOnnxConfig"]
22
+ }
23
+
24
+ try:
25
+ if not is_vision_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["feature_extraction_imagegpt"] = ["ImageGPTFeatureExtractor"]
31
+ _import_structure["image_processing_imagegpt"] = ["ImageGPTImageProcessor"]
32
+
33
+ try:
34
+ if not is_torch_available():
35
+ raise OptionalDependencyNotAvailable()
36
+ except OptionalDependencyNotAvailable:
37
+ pass
38
+ else:
39
+ _import_structure["modeling_imagegpt"] = [
40
+ "IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
41
+ "ImageGPTForCausalImageModeling",
42
+ "ImageGPTForImageClassification",
43
+ "ImageGPTModel",
44
+ "ImageGPTPreTrainedModel",
45
+ "load_tf_weights_in_imagegpt",
46
+ ]
47
+
48
+
49
+ if TYPE_CHECKING:
50
+ from .configuration_imagegpt import IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ImageGPTConfig, ImageGPTOnnxConfig
51
+
52
+ try:
53
+ if not is_vision_available():
54
+ raise OptionalDependencyNotAvailable()
55
+ except OptionalDependencyNotAvailable:
56
+ pass
57
+ else:
58
+ from .feature_extraction_imagegpt import ImageGPTFeatureExtractor
59
+ from .image_processing_imagegpt import ImageGPTImageProcessor
60
+
61
+ try:
62
+ if not is_torch_available():
63
+ raise OptionalDependencyNotAvailable()
64
+ except OptionalDependencyNotAvailable:
65
+ pass
66
+ else:
67
+ from .modeling_imagegpt import (
68
+ IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
69
+ ImageGPTForCausalImageModeling,
70
+ ImageGPTForImageClassification,
71
+ ImageGPTModel,
72
+ ImageGPTPreTrainedModel,
73
+ load_tf_weights_in_imagegpt,
74
+ )
75
+
76
+ else:
77
+ import sys
78
+
79
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/configuration_imagegpt.cpython-310.pyc ADDED
Binary file (8.22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/convert_imagegpt_original_tf2_to_pytorch.cpython-310.pyc ADDED
Binary file (1.83 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/feature_extraction_imagegpt.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/image_processing_imagegpt.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/__pycache__/modeling_imagegpt.cpython-310.pyc ADDED
Binary file (34.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/configuration_imagegpt.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ OpenAI ImageGPT configuration"""
16
+
17
+ from collections import OrderedDict
18
+ from typing import TYPE_CHECKING, Any, Mapping, Optional
19
+
20
+ from ...configuration_utils import PretrainedConfig
21
+ from ...onnx import OnnxConfig
22
+ from ...utils import logging
23
+
24
+
25
+ if TYPE_CHECKING:
26
+ from ... import FeatureExtractionMixin, TensorType
27
+
28
+ logger = logging.get_logger(__name__)
29
+
30
+
31
+ from ..deprecated._archive_maps import IMAGEGPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
32
+
33
+
34
+ class ImageGPTConfig(PretrainedConfig):
35
+ """
36
+ This is the configuration class to store the configuration of a [`ImageGPTModel`] or a [`TFImageGPTModel`]. It is
37
+ used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture.
38
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the ImageGPT
39
+ [openai/imagegpt-small](https://huggingface.co/openai/imagegpt-small) architecture.
40
+
41
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
42
+ documentation from [`PretrainedConfig`] for more information.
43
+
44
+
45
+ Args:
46
+ vocab_size (`int`, *optional*, defaults to 512):
47
+ Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the
48
+ `inputs_ids` passed when calling [`ImageGPTModel`] or [`TFImageGPTModel`].
49
+ n_positions (`int`, *optional*, defaults to 32*32):
50
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
51
+ just in case (e.g., 512 or 1024 or 2048).
52
+ n_embd (`int`, *optional*, defaults to 512):
53
+ Dimensionality of the embeddings and hidden states.
54
+ n_layer (`int`, *optional*, defaults to 24):
55
+ Number of hidden layers in the Transformer encoder.
56
+ n_head (`int`, *optional*, defaults to 8):
57
+ Number of attention heads for each attention layer in the Transformer encoder.
58
+ n_inner (`int`, *optional*, defaults to None):
59
+ Dimensionality of the inner feed-forward layers. `None` will set it to 4 times n_embd
60
+ activation_function (`str`, *optional*, defaults to `"quick_gelu"`):
61
+ Activation function (can be one of the activation functions defined in src/transformers/activations.py).
62
+ Defaults to "quick_gelu".
63
+ resid_pdrop (`float`, *optional*, defaults to 0.1):
64
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
65
+ embd_pdrop (`int`, *optional*, defaults to 0.1):
66
+ The dropout ratio for the embeddings.
67
+ attn_pdrop (`float`, *optional*, defaults to 0.1):
68
+ The dropout ratio for the attention.
69
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-5):
70
+ The epsilon to use in the layer normalization layers.
71
+ initializer_range (`float`, *optional*, defaults to 0.02):
72
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
73
+ scale_attn_weights (`bool`, *optional*, defaults to `True`):
74
+ Scale attention weights by dividing by sqrt(hidden_size)..
75
+ use_cache (`bool`, *optional*, defaults to `True`):
76
+ Whether or not the model should return the last key/values attentions (not used by all models).
77
+ scale_attn_by_inverse_layer_idx (`bool`, *optional*, defaults to `False`):
78
+ Whether to additionally scale attention weights by `1 / layer_idx + 1`.
79
+ reorder_and_upcast_attn (`bool`, *optional*, defaults to `False`):
80
+ Whether to scale keys (K) prior to computing attention (dot-product) and upcast attention
81
+ dot-product/softmax to float() when training with mixed precision.
82
+
83
+ Example:
84
+
85
+ ```python
86
+ >>> from transformers import ImageGPTConfig, ImageGPTModel
87
+
88
+ >>> # Initializing a ImageGPT configuration
89
+ >>> configuration = ImageGPTConfig()
90
+
91
+ >>> # Initializing a model (with random weights) from the configuration
92
+ >>> model = ImageGPTModel(configuration)
93
+
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "imagegpt"
99
+ keys_to_ignore_at_inference = ["past_key_values"]
100
+ attribute_map = {
101
+ "hidden_size": "n_embd",
102
+ "max_position_embeddings": "n_positions",
103
+ "num_attention_heads": "n_head",
104
+ "num_hidden_layers": "n_layer",
105
+ }
106
+
107
+ def __init__(
108
+ self,
109
+ vocab_size=512 + 1, # add one for start of sentence (sos) token
110
+ n_positions=32 * 32,
111
+ n_embd=512,
112
+ n_layer=24,
113
+ n_head=8,
114
+ n_inner=None,
115
+ activation_function="quick_gelu",
116
+ resid_pdrop=0.1,
117
+ embd_pdrop=0.1,
118
+ attn_pdrop=0.1,
119
+ layer_norm_epsilon=1e-5,
120
+ initializer_range=0.02,
121
+ scale_attn_weights=True,
122
+ use_cache=True,
123
+ tie_word_embeddings=False,
124
+ scale_attn_by_inverse_layer_idx=False,
125
+ reorder_and_upcast_attn=False,
126
+ **kwargs,
127
+ ):
128
+ self.vocab_size = vocab_size
129
+ self.n_positions = n_positions
130
+ self.n_embd = n_embd
131
+ self.n_layer = n_layer
132
+ self.n_head = n_head
133
+ self.n_inner = n_inner
134
+ self.activation_function = activation_function
135
+ self.resid_pdrop = resid_pdrop
136
+ self.embd_pdrop = embd_pdrop
137
+ self.attn_pdrop = attn_pdrop
138
+ self.layer_norm_epsilon = layer_norm_epsilon
139
+ self.initializer_range = initializer_range
140
+ self.scale_attn_weights = scale_attn_weights
141
+ self.use_cache = use_cache
142
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
143
+ self.reorder_and_upcast_attn = reorder_and_upcast_attn
144
+ self.tie_word_embeddings = tie_word_embeddings
145
+
146
+ super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)
147
+
148
+
149
+ class ImageGPTOnnxConfig(OnnxConfig):
150
+ @property
151
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
152
+ return OrderedDict(
153
+ [
154
+ ("input_ids", {0: "batch", 1: "sequence"}),
155
+ ]
156
+ )
157
+
158
+ def generate_dummy_inputs(
159
+ self,
160
+ preprocessor: "FeatureExtractionMixin",
161
+ batch_size: int = 1,
162
+ seq_length: int = -1,
163
+ is_pair: bool = False,
164
+ framework: Optional["TensorType"] = None,
165
+ num_channels: int = 3,
166
+ image_width: int = 32,
167
+ image_height: int = 32,
168
+ ) -> Mapping[str, Any]:
169
+ """
170
+ Generate inputs to provide to the ONNX exporter for the specific framework
171
+
172
+ Args:
173
+ preprocessor ([`PreTrainedTokenizerBase`] or [`FeatureExtractionMixin`]):
174
+ The preprocessor associated with this model configuration.
175
+ batch_size (`int`, *optional*, defaults to -1):
176
+ The batch size to export the model for (-1 means dynamic axis).
177
+ num_choices (`int`, *optional*, defaults to -1):
178
+ The number of candidate answers provided for multiple choice task (-1 means dynamic axis).
179
+ seq_length (`int`, *optional*, defaults to -1):
180
+ The sequence length to export the model for (-1 means dynamic axis).
181
+ is_pair (`bool`, *optional*, defaults to `False`):
182
+ Indicate if the input is a pair (sentence 1, sentence 2)
183
+ framework (`TensorType`, *optional*, defaults to `None`):
184
+ The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for.
185
+ num_channels (`int`, *optional*, defaults to 3):
186
+ The number of channels of the generated images.
187
+ image_width (`int`, *optional*, defaults to 40):
188
+ The width of the generated images.
189
+ image_height (`int`, *optional*, defaults to 40):
190
+ The height of the generated images.
191
+
192
+ Returns:
193
+ Mapping[str, Tensor] holding the kwargs to provide to the model's forward function
194
+ """
195
+
196
+ input_image = self._generate_dummy_images(batch_size, num_channels, image_height, image_width)
197
+ inputs = dict(preprocessor(images=input_image, return_tensors=framework))
198
+
199
+ return inputs
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/convert_imagegpt_original_tf2_to_pytorch.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert OpenAI Image GPT checkpoints."""
16
+
17
+
18
+ import argparse
19
+
20
+ import torch
21
+
22
+ from transformers import ImageGPTConfig, ImageGPTForCausalLM, load_tf_weights_in_imagegpt
23
+ from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
24
+
25
+
26
+ logging.set_verbosity_info()
27
+
28
+
29
+ def convert_imagegpt_checkpoint_to_pytorch(imagegpt_checkpoint_path, model_size, pytorch_dump_folder_path):
30
+ # Construct configuration depending on size
31
+ MODELS = {"small": (512, 8, 24), "medium": (1024, 8, 36), "large": (1536, 16, 48)}
32
+ n_embd, n_head, n_layer = MODELS[model_size] # set model hyperparameters
33
+ config = ImageGPTConfig(n_embd=n_embd, n_layer=n_layer, n_head=n_head)
34
+ model = ImageGPTForCausalLM(config)
35
+
36
+ # Load weights from numpy
37
+ load_tf_weights_in_imagegpt(model, config, imagegpt_checkpoint_path)
38
+
39
+ # Save pytorch-model
40
+ pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
41
+ pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME
42
+ print(f"Save PyTorch model to {pytorch_weights_dump_path}")
43
+ torch.save(model.state_dict(), pytorch_weights_dump_path)
44
+ print(f"Save configuration file to {pytorch_config_dump_path}")
45
+ with open(pytorch_config_dump_path, "w", encoding="utf-8") as f:
46
+ f.write(config.to_json_string())
47
+
48
+
49
+ if __name__ == "__main__":
50
+ parser = argparse.ArgumentParser()
51
+ # Required parameters
52
+ parser.add_argument(
53
+ "--imagegpt_checkpoint_path",
54
+ default=None,
55
+ type=str,
56
+ required=True,
57
+ help="Path to the TensorFlow checkpoint path.",
58
+ )
59
+ parser.add_argument(
60
+ "--model_size",
61
+ default=None,
62
+ type=str,
63
+ required=True,
64
+ help="Size of the model (can be either 'small', 'medium' or 'large').",
65
+ )
66
+ parser.add_argument(
67
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
68
+ )
69
+ args = parser.parse_args()
70
+ convert_imagegpt_checkpoint_to_pytorch(
71
+ args.imagegpt_checkpoint_path, args.model_size, args.pytorch_dump_folder_path
72
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/feature_extraction_imagegpt.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for ImageGPT."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_imagegpt import ImageGPTImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class ImageGPTFeatureExtractor(ImageGPTImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
30
+ " Please use ImageGPTImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/image_processing_imagegpt.py ADDED
@@ -0,0 +1,314 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for ImageGPT."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import rescale, resize, to_channel_dimension_format
23
+ from ...image_utils import (
24
+ ChannelDimension,
25
+ ImageInput,
26
+ PILImageResampling,
27
+ infer_channel_dimension_format,
28
+ is_scaled_image,
29
+ make_list_of_images,
30
+ to_numpy_array,
31
+ valid_images,
32
+ validate_kwargs,
33
+ validate_preprocess_arguments,
34
+ )
35
+ from ...utils import TensorType, is_vision_available, logging
36
+
37
+
38
+ if is_vision_available():
39
+ import PIL
40
+
41
+
42
+ logger = logging.get_logger(__name__)
43
+
44
+
45
+ def squared_euclidean_distance(a, b):
46
+ b = b.T
47
+ a2 = np.sum(np.square(a), axis=1)
48
+ b2 = np.sum(np.square(b), axis=0)
49
+ ab = np.matmul(a, b)
50
+ d = a2[:, None] - 2 * ab + b2[None, :]
51
+ return d
52
+
53
+
54
+ def color_quantize(x, clusters):
55
+ x = x.reshape(-1, 3)
56
+ d = squared_euclidean_distance(x, clusters)
57
+ return np.argmin(d, axis=1)
58
+
59
+
60
+ class ImageGPTImageProcessor(BaseImageProcessor):
61
+ r"""
62
+ Constructs a ImageGPT image processor. This image processor can be used to resize images to a smaller resolution
63
+ (such as 32x32 or 64x64), normalize them and finally color quantize them to obtain sequences of "pixel values"
64
+ (color clusters).
65
+
66
+ Args:
67
+ clusters (`np.ndarray` or `List[List[int]]`, *optional*):
68
+ The color clusters to use, of shape `(n_clusters, 3)` when color quantizing. Can be overriden by `clusters`
69
+ in `preprocess`.
70
+ do_resize (`bool`, *optional*, defaults to `True`):
71
+ Whether to resize the image's dimensions to `(size["height"], size["width"])`. Can be overridden by
72
+ `do_resize` in `preprocess`.
73
+ size (`Dict[str, int]` *optional*, defaults to `{"height": 256, "width": 256}`):
74
+ Size of the image after resizing. Can be overridden by `size` in `preprocess`.
75
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
76
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`.
77
+ do_normalize (`bool`, *optional*, defaults to `True`):
78
+ Whether to normalize the image pixel value to between [-1, 1]. Can be overridden by `do_normalize` in
79
+ `preprocess`.
80
+ do_color_quantize (`bool`, *optional*, defaults to `True`):
81
+ Whether to color quantize the image. Can be overridden by `do_color_quantize` in `preprocess`.
82
+ """
83
+
84
+ model_input_names = ["pixel_values"]
85
+
86
+ def __init__(
87
+ self,
88
+ # clusters is a first argument to maintain backwards compatibility with the old ImageGPTImageProcessor
89
+ clusters: Optional[Union[List[List[int]], np.ndarray]] = None,
90
+ do_resize: bool = True,
91
+ size: Dict[str, int] = None,
92
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
93
+ do_normalize: bool = True,
94
+ do_color_quantize: bool = True,
95
+ **kwargs,
96
+ ) -> None:
97
+ super().__init__(**kwargs)
98
+ size = size if size is not None else {"height": 256, "width": 256}
99
+ size = get_size_dict(size)
100
+ self.clusters = np.array(clusters) if clusters is not None else None
101
+ self.do_resize = do_resize
102
+ self.size = size
103
+ self.resample = resample
104
+ self.do_normalize = do_normalize
105
+ self.do_color_quantize = do_color_quantize
106
+ self._valid_processor_keys = [
107
+ "images",
108
+ "do_resize",
109
+ "size",
110
+ "resample",
111
+ "do_normalize",
112
+ "do_color_quantize",
113
+ "clusters",
114
+ "return_tensors",
115
+ "data_format",
116
+ "input_data_format",
117
+ ]
118
+
119
+ # Copied from transformers.models.vit.image_processing_vit.ViTImageProcessor.resize
120
+ def resize(
121
+ self,
122
+ image: np.ndarray,
123
+ size: Dict[str, int],
124
+ resample: PILImageResampling = PILImageResampling.BILINEAR,
125
+ data_format: Optional[Union[str, ChannelDimension]] = None,
126
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
127
+ **kwargs,
128
+ ) -> np.ndarray:
129
+ """
130
+ Resize an image to `(size["height"], size["width"])`.
131
+
132
+ Args:
133
+ image (`np.ndarray`):
134
+ Image to resize.
135
+ size (`Dict[str, int]`):
136
+ Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
137
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):
138
+ `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
139
+ data_format (`ChannelDimension` or `str`, *optional*):
140
+ The channel dimension format for the output image. If unset, the channel dimension format of the input
141
+ image is used. Can be one of:
142
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
143
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
144
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
145
+ input_data_format (`ChannelDimension` or `str`, *optional*):
146
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
147
+ from the input image. Can be one of:
148
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
149
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
150
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
151
+
152
+ Returns:
153
+ `np.ndarray`: The resized image.
154
+ """
155
+ size = get_size_dict(size)
156
+ if "height" not in size or "width" not in size:
157
+ raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
158
+ output_size = (size["height"], size["width"])
159
+ return resize(
160
+ image,
161
+ size=output_size,
162
+ resample=resample,
163
+ data_format=data_format,
164
+ input_data_format=input_data_format,
165
+ **kwargs,
166
+ )
167
+
168
+ def normalize(
169
+ self,
170
+ image: np.ndarray,
171
+ data_format: Optional[Union[str, ChannelDimension]] = None,
172
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
173
+ ) -> np.ndarray:
174
+ """
175
+ Normalizes an images' pixel values to between [-1, 1].
176
+
177
+ Args:
178
+ image (`np.ndarray`):
179
+ Image to normalize.
180
+ data_format (`str` or `ChannelDimension`, *optional*):
181
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
182
+ input_data_format (`ChannelDimension` or `str`, *optional*):
183
+ The channel dimension format of the input image. If not provided, it will be inferred.
184
+ """
185
+ image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format)
186
+ image = image - 1
187
+ return image
188
+
189
+ def preprocess(
190
+ self,
191
+ images: ImageInput,
192
+ do_resize: bool = None,
193
+ size: Dict[str, int] = None,
194
+ resample: PILImageResampling = None,
195
+ do_normalize: bool = None,
196
+ do_color_quantize: Optional[bool] = None,
197
+ clusters: Optional[Union[List[List[int]], np.ndarray]] = None,
198
+ return_tensors: Optional[Union[str, TensorType]] = None,
199
+ data_format: Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST,
200
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
201
+ **kwargs,
202
+ ) -> PIL.Image.Image:
203
+ """
204
+ Preprocess an image or batch of images.
205
+
206
+ Args:
207
+ images (`ImageInput`):
208
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
209
+ passing in images with pixel values between 0 and 1, set `do_normalize=False`.
210
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
211
+ Whether to resize the image.
212
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
213
+ Size of the image after resizing.
214
+ resample (`int`, *optional*, defaults to `self.resample`):
215
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only
216
+ has an effect if `do_resize` is set to `True`.
217
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
218
+ Whether to normalize the image
219
+ do_color_quantize (`bool`, *optional*, defaults to `self.do_color_quantize`):
220
+ Whether to color quantize the image.
221
+ clusters (`np.ndarray` or `List[List[int]]`, *optional*, defaults to `self.clusters`):
222
+ Clusters used to quantize the image of shape `(n_clusters, 3)`. Only has an effect if
223
+ `do_color_quantize` is set to `True`.
224
+ return_tensors (`str` or `TensorType`, *optional*):
225
+ The type of tensors to return. Can be one of:
226
+ - Unset: Return a list of `np.ndarray`.
227
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
228
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
229
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
230
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
231
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
232
+ The channel dimension format for the output image. Can be one of:
233
+ - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
234
+ - `ChannelDimension.LAST`: image in (height, width, num_channels) format.
235
+ Only has an effect if `do_color_quantize` is set to `False`.
236
+ input_data_format (`ChannelDimension` or `str`, *optional*):
237
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
238
+ from the input image. Can be one of:
239
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
240
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
241
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
242
+ """
243
+ do_resize = do_resize if do_resize is not None else self.do_resize
244
+ size = size if size is not None else self.size
245
+ size = get_size_dict(size)
246
+ resample = resample if resample is not None else self.resample
247
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
248
+ do_color_quantize = do_color_quantize if do_color_quantize is not None else self.do_color_quantize
249
+ clusters = clusters if clusters is not None else self.clusters
250
+ clusters = np.array(clusters)
251
+
252
+ images = make_list_of_images(images)
253
+
254
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
255
+
256
+ if not valid_images(images):
257
+ raise ValueError(
258
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
259
+ "torch.Tensor, tf.Tensor or jax.ndarray."
260
+ )
261
+
262
+ # Here, normalize() is using a constant factor to divide pixel values.
263
+ # hence, the method does not need iamge_mean and image_std.
264
+ validate_preprocess_arguments(
265
+ do_resize=do_resize,
266
+ size=size,
267
+ resample=resample,
268
+ )
269
+
270
+ if do_color_quantize and clusters is None:
271
+ raise ValueError("Clusters must be specified if do_color_quantize is True.")
272
+
273
+ # All transformations expect numpy arrays.
274
+ images = [to_numpy_array(image) for image in images]
275
+
276
+ if is_scaled_image(images[0]) and do_normalize:
277
+ logger.warning_once(
278
+ "It looks like you are trying to rescale already rescaled images. If you wish to do this, "
279
+ "make sure to set `do_normalize` to `False` and that pixel values are between [-1, 1].",
280
+ )
281
+
282
+ if input_data_format is None:
283
+ # We assume that all images have the same channel dimension format.
284
+ input_data_format = infer_channel_dimension_format(images[0])
285
+
286
+ if do_resize:
287
+ images = [
288
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
289
+ for image in images
290
+ ]
291
+
292
+ if do_normalize:
293
+ images = [self.normalize(image=image, input_data_format=input_data_format) for image in images]
294
+
295
+ if do_color_quantize:
296
+ images = [to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format) for image in images]
297
+ # color quantize from (batch_size, height, width, 3) to (batch_size, height, width)
298
+ images = np.array(images)
299
+ images = color_quantize(images, clusters).reshape(images.shape[:-1])
300
+
301
+ # flatten to (batch_size, height*width)
302
+ batch_size = images.shape[0]
303
+ images = images.reshape(batch_size, -1)
304
+
305
+ # We need to convert back to a list of images to keep consistent behaviour across processors.
306
+ images = list(images)
307
+ else:
308
+ images = [
309
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
310
+ for image in images
311
+ ]
312
+
313
+ data = {"input_ids": images}
314
+ return BatchFeature(data=data, tensor_type=return_tensors)
llmeval-env/lib/python3.10/site-packages/transformers/models/imagegpt/modeling_imagegpt.py ADDED
@@ -0,0 +1,1200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The OpenAI Team Authors and HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch OpenAI ImageGPT model."""
16
+
17
+ import math
18
+ import os
19
+ import warnings
20
+ from typing import Any, Optional, Tuple, Union
21
+
22
+ import torch
23
+ import torch.utils.checkpoint
24
+ from torch import nn
25
+ from torch.cuda.amp import autocast
26
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
27
+
28
+ from ...activations import ACT2FN
29
+ from ...modeling_outputs import (
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ CausalLMOutputWithCrossAttentions,
32
+ SequenceClassifierOutputWithPast,
33
+ )
34
+ from ...modeling_utils import PreTrainedModel
35
+ from ...pytorch_utils import Conv1D, find_pruneable_heads_and_indices, prune_conv1d_layer
36
+ from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
37
+ from .configuration_imagegpt import ImageGPTConfig
38
+
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+ _CHECKPOINT_FOR_DOC = "openai/imagegpt-small"
43
+ _CONFIG_FOR_DOC = "ImageGPTConfig"
44
+
45
+
46
+ from ..deprecated._archive_maps import IMAGEGPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
47
+
48
+
49
+ def load_tf_weights_in_imagegpt(model, config, imagegpt_checkpoint_path):
50
+ """
51
+ Load tf checkpoints in a pytorch model
52
+ """
53
+ try:
54
+ import re
55
+
56
+ import tensorflow as tf
57
+ except ImportError:
58
+ logger.error(
59
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
60
+ "https://www.tensorflow.org/install/ for installation instructions."
61
+ )
62
+ raise
63
+ tf_path = os.path.abspath(imagegpt_checkpoint_path)
64
+ logger.info("Converting TensorFlow checkpoint from {}".format(tf_path))
65
+ # Load weights from TF model
66
+ init_vars = tf.train.list_variables(tf_path)
67
+ names = []
68
+ arrays = []
69
+
70
+ for name, shape in init_vars:
71
+ logger.info("Loading TF weight {} with shape {}".format(name, shape))
72
+ array = tf.train.load_variable(tf_path, name)
73
+ names.append(name)
74
+ arrays.append(array.squeeze())
75
+
76
+ for name, array in zip(names, arrays):
77
+ name = name[6:] # skip "model/"
78
+ name = name.split("/")
79
+
80
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
81
+ # which are not required for using pretrained model
82
+ if any(
83
+ n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
84
+ for n in name
85
+ ) or name[-1] in ["_step"]:
86
+ logger.info("Skipping {}".format("/".join(name)))
87
+ continue
88
+
89
+ pointer = model
90
+ if name[-1] not in ["wtet"]:
91
+ pointer = getattr(pointer, "transformer")
92
+
93
+ for m_name in name:
94
+ if re.fullmatch(r"[A-Za-z]+\d+", m_name):
95
+ scope_names = re.split(r"(\d+)", m_name)
96
+ else:
97
+ scope_names = [m_name]
98
+
99
+ if scope_names[0] == "w" or scope_names[0] == "g":
100
+ pointer = getattr(pointer, "weight")
101
+ elif scope_names[0] == "b":
102
+ pointer = getattr(pointer, "bias")
103
+ elif scope_names[0] == "wpe" or scope_names[0] == "wte":
104
+ pointer = getattr(pointer, scope_names[0])
105
+ pointer = getattr(pointer, "weight")
106
+ elif scope_names[0] in ["q_proj", "k_proj", "v_proj"]:
107
+ pointer = getattr(pointer, "c_attn")
108
+ pointer = getattr(pointer, "weight")
109
+ elif len(name) == 3 and name[1] == "attn" and scope_names[0] == "c_proj":
110
+ pointer = getattr(pointer, scope_names[0])
111
+ pointer = getattr(pointer, "weight")
112
+ elif scope_names[0] == "wtet":
113
+ pointer = getattr(pointer, "lm_head")
114
+ pointer = getattr(pointer, "weight")
115
+ elif scope_names[0] == "sos":
116
+ pointer = getattr(pointer, "wte")
117
+ pointer = getattr(pointer, "weight")
118
+ else:
119
+ pointer = getattr(pointer, scope_names[0])
120
+ if len(scope_names) >= 2:
121
+ num = int(scope_names[1])
122
+ pointer = pointer[num]
123
+
124
+ if len(name) > 1 and name[1] == "attn" or name[-1] == "wtet" or name[-1] == "sos" or name[-1] == "wte":
125
+ pass # array is used to initialize only part of the pointer so sizes won't match
126
+ else:
127
+ try:
128
+ assert pointer.shape == array.shape
129
+ except AssertionError as e:
130
+ e.args += (pointer.shape, array.shape)
131
+ raise
132
+
133
+ logger.info("Initialize PyTorch weight {}".format(name))
134
+
135
+ if name[-1] == "q_proj":
136
+ pointer.data[:, : config.n_embd] = torch.from_numpy(array.reshape(config.n_embd, config.n_embd)).T
137
+ elif name[-1] == "k_proj":
138
+ pointer.data[:, config.n_embd : 2 * config.n_embd] = torch.from_numpy(
139
+ array.reshape(config.n_embd, config.n_embd)
140
+ ).T
141
+ elif name[-1] == "v_proj":
142
+ pointer.data[:, 2 * config.n_embd :] = torch.from_numpy(array.reshape(config.n_embd, config.n_embd)).T
143
+ elif len(name) == 3 and name[1] == "attn" and name[2] == "c_proj":
144
+ pointer.data = torch.from_numpy(array.reshape(config.n_embd, config.n_embd))
145
+ elif name[-1] == "wtet":
146
+ pointer.data = torch.from_numpy(array)
147
+ elif name[-1] == "wte":
148
+ pointer.data[: config.vocab_size - 1, :] = torch.from_numpy(array)
149
+ elif name[-1] == "sos":
150
+ pointer.data[-1] = torch.from_numpy(array)
151
+ else:
152
+ pointer.data = torch.from_numpy(array)
153
+
154
+ return model
155
+
156
+
157
+ class ImageGPTLayerNorm(nn.Module):
158
+ def __init__(self, hidden_size: Tuple[int], eps: float = 1e-5):
159
+ super().__init__()
160
+ self.eps = eps
161
+ self.weight = nn.Parameter(torch.Tensor(hidden_size))
162
+
163
+ def forward(self, tensor: torch.Tensor) -> tuple:
164
+ # input is not mean centered
165
+ return (
166
+ tensor
167
+ / torch.sqrt(torch.mean(torch.square(tensor), axis=-1, keepdim=True) + self.eps)
168
+ * self.weight.data[..., :]
169
+ )
170
+
171
+
172
+ class ImageGPTAttention(nn.Module):
173
+ def __init__(self, config, is_cross_attention: Optional[bool] = False, layer_idx: Optional[int] = None):
174
+ super().__init__()
175
+
176
+ max_positions = config.max_position_embeddings
177
+ self.register_buffer(
178
+ "bias",
179
+ torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
180
+ 1, 1, max_positions, max_positions
181
+ ),
182
+ persistent=False,
183
+ )
184
+ self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False)
185
+
186
+ self.embed_dim = config.hidden_size
187
+ self.num_heads = config.num_attention_heads
188
+ self.head_dim = self.embed_dim // self.num_heads
189
+ self.split_size = self.embed_dim
190
+ if self.head_dim * self.num_heads != self.embed_dim:
191
+ raise ValueError(
192
+ f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
193
+ f" {self.num_heads})."
194
+ )
195
+
196
+ self.scale_attn_weights = config.scale_attn_weights
197
+ self.is_cross_attention = is_cross_attention
198
+
199
+ # Layer-wise attention scaling, reordering, and upcasting
200
+ self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
201
+ self.layer_idx = layer_idx
202
+ self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
203
+
204
+ if self.is_cross_attention:
205
+ self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
206
+ self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
207
+ else:
208
+ self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
209
+ self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
210
+
211
+ self.attn_dropout = nn.Dropout(config.attn_pdrop)
212
+ self.resid_dropout = nn.Dropout(config.resid_pdrop)
213
+
214
+ self.pruned_heads = set()
215
+
216
+ def prune_heads(self, heads):
217
+ if len(heads) == 0:
218
+ return
219
+ heads, index = find_pruneable_heads_and_indices(heads, self.num_heads, self.head_dim, self.pruned_heads)
220
+ index_attn = torch.cat([index, index + self.split_size, index + (2 * self.split_size)])
221
+
222
+ # Prune conv1d layers
223
+ self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1)
224
+ self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0)
225
+
226
+ # Update hyper params
227
+ self.split_size = (self.split_size // self.num_heads) * (self.num_heads - len(heads))
228
+ self.num_heads = self.num_heads - len(heads)
229
+ self.pruned_heads = self.pruned_heads.union(heads)
230
+
231
+ def _attn(self, query, key, value, attention_mask=None, head_mask=None):
232
+ attn_weights = torch.matmul(query, key.transpose(-1, -2))
233
+
234
+ if self.scale_attn_weights:
235
+ attn_weights = attn_weights / (float(value.size(-1)) ** 0.5)
236
+
237
+ # Layer-wise attention scaling
238
+ if self.scale_attn_by_inverse_layer_idx:
239
+ attn_weights = attn_weights / float(self.layer_idx + 1)
240
+
241
+ if not self.is_cross_attention:
242
+ # if only "normal" attention layer implements causal mask
243
+ query_length, key_length = query.size(-2), key.size(-2)
244
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
245
+ mask_value = torch.finfo(attn_weights.dtype).min
246
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
247
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
248
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
249
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
250
+
251
+ if attention_mask is not None:
252
+ # Apply the attention mask
253
+ attn_weights = attn_weights + attention_mask
254
+
255
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
256
+
257
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
258
+ attn_weights = attn_weights.type(value.dtype)
259
+ attn_weights = self.attn_dropout(attn_weights)
260
+
261
+ # Mask heads if we want to
262
+ if head_mask is not None:
263
+ attn_weights = attn_weights * head_mask
264
+
265
+ attn_output = torch.matmul(attn_weights, value)
266
+
267
+ return attn_output, attn_weights
268
+
269
+ def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None, head_mask=None):
270
+ # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
271
+ bsz, num_heads, q_seq_len, dk = query.size()
272
+ _, _, k_seq_len, _ = key.size()
273
+
274
+ # Preallocate attn_weights for `baddbmm`
275
+ attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
276
+
277
+ # Compute Scale Factor
278
+ scale_factor = 1.0
279
+ if self.scale_attn_weights:
280
+ scale_factor /= float(value.size(-1)) ** 0.5
281
+
282
+ if self.scale_attn_by_inverse_layer_idx:
283
+ scale_factor /= float(self.layer_idx + 1)
284
+
285
+ # Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
286
+ with autocast(enabled=False):
287
+ q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
288
+ attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
289
+ attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
290
+
291
+ if not self.is_cross_attention:
292
+ # if only "normal" attention layer implements causal mask
293
+ query_length, key_length = query.size(-2), key.size(-2)
294
+ causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
295
+ mask_value = torch.finfo(attn_weights.dtype).min
296
+ # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
297
+ # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
298
+ mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device)
299
+ attn_weights = torch.where(causal_mask, attn_weights, mask_value)
300
+
301
+ if attention_mask is not None:
302
+ # Apply the attention mask
303
+ attn_weights = attn_weights + attention_mask
304
+
305
+ attn_weights = nn.Softmax(dim=-1)(attn_weights)
306
+
307
+ # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
308
+ if attn_weights.dtype != torch.float32:
309
+ raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
310
+ attn_weights = attn_weights.type(value.dtype)
311
+ attn_weights = self.attn_dropout(attn_weights)
312
+
313
+ # Mask heads if we want to
314
+ if head_mask is not None:
315
+ attn_weights = attn_weights * head_mask
316
+
317
+ attn_output = torch.matmul(attn_weights, value)
318
+
319
+ return attn_output, attn_weights
320
+
321
+ def _split_heads(self, tensor, num_heads, attn_head_size):
322
+ """
323
+ Splits hidden_size dim into attn_head_size and num_heads
324
+ """
325
+ new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
326
+ tensor = tensor.view(*new_shape)
327
+ return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
328
+
329
+ def _merge_heads(self, tensor, num_heads, attn_head_size):
330
+ """
331
+ Merges attn_head_size dim and num_attn_heads dim into hidden_size
332
+ """
333
+ tensor = tensor.permute(0, 2, 1, 3).contiguous()
334
+ new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
335
+ return tensor.view(new_shape)
336
+
337
+ def forward(
338
+ self,
339
+ hidden_states: torch.Tensor,
340
+ layer_past: Optional[bool] = None,
341
+ attention_mask: Optional[torch.Tensor] = None,
342
+ head_mask: Optional[torch.Tensor] = None,
343
+ encoder_hidden_states: Optional[torch.Tensor] = None,
344
+ encoder_attention_mask: Optional[torch.Tensor] = None,
345
+ use_cache: Optional[bool] = False,
346
+ output_attentions: Optional[bool] = False,
347
+ ) -> tuple:
348
+ if encoder_hidden_states is not None:
349
+ if not hasattr(self, "q_attn"):
350
+ raise ValueError(
351
+ "If class is used as cross attention, the weights `q_attn` have to be defined. "
352
+ "Please make sure to instantiate class with `ImageGPTAttention(..., is_cross_attention=True)`."
353
+ )
354
+
355
+ query = self.q_attn(hidden_states)
356
+ key, value = self.c_attn(encoder_hidden_states).split(self.split_size, dim=2)
357
+ attention_mask = encoder_attention_mask
358
+ else:
359
+ query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2)
360
+
361
+ query = self._split_heads(query, self.num_heads, self.head_dim)
362
+ key = self._split_heads(key, self.num_heads, self.head_dim)
363
+ value = self._split_heads(value, self.num_heads, self.head_dim)
364
+
365
+ if layer_past is not None:
366
+ past_key, past_value = layer_past
367
+ key = torch.cat((past_key, key), dim=-2)
368
+ value = torch.cat((past_value, value), dim=-2)
369
+
370
+ if use_cache is True:
371
+ present = (key, value)
372
+ else:
373
+ present = None
374
+
375
+ if self.reorder_and_upcast_attn:
376
+ attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask, head_mask)
377
+ else:
378
+ attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
379
+
380
+ attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
381
+ attn_output = self.c_proj(attn_output)
382
+ attn_output = self.resid_dropout(attn_output)
383
+
384
+ outputs = (attn_output, present)
385
+ if output_attentions:
386
+ outputs += (attn_weights,)
387
+
388
+ return outputs # a, present, (attentions)
389
+
390
+
391
+ class ImageGPTMLP(nn.Module):
392
+ def __init__(self, intermediate_size, config):
393
+ super().__init__()
394
+ embed_dim = config.hidden_size
395
+ self.c_fc = Conv1D(intermediate_size, embed_dim)
396
+ self.c_proj = Conv1D(embed_dim, intermediate_size)
397
+ self.act = ACT2FN[config.activation_function]
398
+ self.dropout = nn.Dropout(config.resid_pdrop)
399
+
400
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
401
+ hidden_states = self.c_fc(hidden_states)
402
+ hidden_states = self.act(hidden_states)
403
+ hidden_states = self.c_proj(hidden_states)
404
+ hidden_states = self.dropout(hidden_states)
405
+ return hidden_states
406
+
407
+
408
+ class ImageGPTBlock(nn.Module):
409
+ def __init__(self, config, layer_idx=None):
410
+ super().__init__()
411
+ hidden_size = config.hidden_size
412
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
413
+
414
+ self.ln_1 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
415
+ self.attn = ImageGPTAttention(config, layer_idx=layer_idx)
416
+ self.ln_2 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
417
+
418
+ if config.add_cross_attention:
419
+ self.crossattention = ImageGPTAttention(config, is_cross_attention=True, layer_idx=layer_idx)
420
+ self.ln_cross_attn = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
421
+
422
+ self.mlp = ImageGPTMLP(inner_dim, config)
423
+
424
+ def forward(
425
+ self,
426
+ hidden_states: torch.Tensor,
427
+ layer_past: Optional[bool] = None,
428
+ attention_mask: Optional[torch.Tensor] = None,
429
+ head_mask: Optional[torch.Tensor] = None,
430
+ encoder_hidden_states: Optional[torch.Tensor] = None,
431
+ encoder_attention_mask: Optional[torch.Tensor] = None,
432
+ use_cache: Optional[bool] = False,
433
+ output_attentions: Optional[bool] = False,
434
+ ) -> tuple:
435
+ residual = hidden_states
436
+ hidden_states = self.ln_1(hidden_states)
437
+ attn_outputs = self.attn(
438
+ hidden_states,
439
+ layer_past=layer_past,
440
+ attention_mask=attention_mask,
441
+ head_mask=head_mask,
442
+ use_cache=use_cache,
443
+ output_attentions=output_attentions,
444
+ )
445
+ attn_output = attn_outputs[0] # output_attn: a, present, (attentions)
446
+ outputs = attn_outputs[1:]
447
+ # residual connection
448
+ hidden_states = attn_output + residual
449
+
450
+ if encoder_hidden_states is not None:
451
+ # add one self-attention block for cross-attention
452
+ if not hasattr(self, "crossattention"):
453
+ raise ValueError(
454
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
455
+ "cross-attention layers by setting `config.add_cross_attention=True`"
456
+ )
457
+ residual = hidden_states
458
+ hidden_states = self.ln_cross_attn(hidden_states)
459
+ cross_attn_outputs = self.crossattention(
460
+ hidden_states,
461
+ attention_mask=attention_mask,
462
+ head_mask=head_mask,
463
+ encoder_hidden_states=encoder_hidden_states,
464
+ encoder_attention_mask=encoder_attention_mask,
465
+ output_attentions=output_attentions,
466
+ )
467
+ attn_output = cross_attn_outputs[0]
468
+ # residual connection
469
+ hidden_states = residual + attn_output
470
+ outputs = outputs + cross_attn_outputs[2:] # add cross attentions if we output attention weights
471
+
472
+ residual = hidden_states
473
+ hidden_states = self.ln_2(hidden_states)
474
+ feed_forward_hidden_states = self.mlp(hidden_states)
475
+ # residual connection
476
+ hidden_states = residual + feed_forward_hidden_states
477
+
478
+ outputs = (hidden_states,) + (outputs if use_cache else outputs[1:])
479
+
480
+ return outputs # hidden_states, present, (attentions, cross_attentions)
481
+
482
+
483
+ class ImageGPTPreTrainedModel(PreTrainedModel):
484
+ """
485
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
486
+ models.
487
+ """
488
+
489
+ config_class = ImageGPTConfig
490
+ load_tf_weights = load_tf_weights_in_imagegpt
491
+ base_model_prefix = "transformer"
492
+ main_input_name = "input_ids"
493
+ supports_gradient_checkpointing = True
494
+
495
+ def __init__(self, *inputs, **kwargs):
496
+ super().__init__(*inputs, **kwargs)
497
+
498
+ def _init_weights(self, module):
499
+ """Initialize the weights."""
500
+ if isinstance(module, (nn.Linear, Conv1D)):
501
+ # Slightly different from the TF version which uses truncated_normal for initialization
502
+ # cf https://github.com/pytorch/pytorch/pull/5617
503
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
504
+ if module.bias is not None:
505
+ module.bias.data.zero_()
506
+ elif isinstance(module, nn.Embedding):
507
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
508
+ if module.padding_idx is not None:
509
+ module.weight.data[module.padding_idx].zero_()
510
+ elif isinstance(module, ImageGPTLayerNorm):
511
+ module.weight.data.fill_(1.0)
512
+
513
+ # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
514
+ # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
515
+ # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
516
+ # > -- GPT-2 :: https://openai.com/blog/better-language-models/
517
+ #
518
+ # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
519
+ for name, p in module.named_parameters():
520
+ if "c_proj" in name and "weight" in name:
521
+ # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
522
+ p.data.normal_(mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.n_layer)))
523
+
524
+
525
+ IMAGEGPT_START_DOCSTRING = r"""
526
+
527
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
528
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
529
+ etc.)
530
+
531
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
532
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
533
+ and behavior.
534
+
535
+ Parameters:
536
+ config ([`ImageGPTConfig`]): Model configuration class with all the parameters of the model.
537
+ Initializing with a config file does not load the weights associated with the model, only the
538
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
539
+ """
540
+
541
+ IMAGEGPT_INPUTS_DOCSTRING = r"""
542
+ Args:
543
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
544
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else
545
+ `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input
546
+ sequence tokens in the vocabulary.
547
+
548
+ If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
549
+ `input_ids`.
550
+
551
+ Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
552
+
553
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
554
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
555
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
556
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
557
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
558
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
559
+
560
+ - 1 for tokens that are **not masked**,
561
+ - 0 for tokens that are **masked**.
562
+
563
+ [What are attention masks?](../glossary#attention-mask)
564
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
565
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
566
+ 1]`:
567
+
568
+ - 0 corresponds to a *sentence A* token,
569
+ - 1 corresponds to a *sentence B* token.
570
+
571
+ [What are token type IDs?](../glossary#token-type-ids)
572
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
573
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
574
+ config.max_position_embeddings - 1]`.
575
+
576
+ [What are position IDs?](../glossary#position-ids)
577
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
578
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
579
+
580
+ - 1 indicates the head is **not masked**,
581
+ - 0 indicates the head is **masked**.
582
+
583
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
584
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
585
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
586
+ model's internal embedding lookup matrix.
587
+
588
+ If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
589
+ `past_key_values`).
590
+ use_cache (`bool`, *optional*):
591
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
592
+ `past_key_values`).
593
+ output_attentions (`bool`, *optional*):
594
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
595
+ tensors for more detail.
596
+ output_hidden_states (`bool`, *optional*):
597
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
598
+ more detail.
599
+ return_dict (`bool`, *optional*):
600
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
601
+ """
602
+
603
+
604
+ @add_start_docstrings(
605
+ "The bare ImageGPT Model transformer outputting raw hidden-states without any specific head on top.",
606
+ IMAGEGPT_START_DOCSTRING,
607
+ )
608
+ class ImageGPTModel(ImageGPTPreTrainedModel):
609
+ def __init__(self, config: ImageGPTConfig):
610
+ super().__init__(config)
611
+
612
+ self.embed_dim = config.hidden_size
613
+
614
+ self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
615
+ self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
616
+
617
+ self.drop = nn.Dropout(config.embd_pdrop)
618
+ self.h = nn.ModuleList([ImageGPTBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)])
619
+ self.ln_f = ImageGPTLayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
620
+
621
+ # Model parallel
622
+ self.model_parallel = False
623
+ self.device_map = None
624
+ self.gradient_checkpointing = False
625
+ # Initialize weights and apply final processing
626
+ self.post_init()
627
+
628
+ def get_input_embeddings(self):
629
+ return self.wte
630
+
631
+ def set_input_embeddings(self, new_embeddings):
632
+ self.wte = new_embeddings
633
+
634
+ def _prune_heads(self, heads_to_prune):
635
+ """
636
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
637
+ """
638
+ for layer, heads in heads_to_prune.items():
639
+ self.h[layer].attn.prune_heads(heads)
640
+
641
+ @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING)
642
+ @replace_return_docstrings(output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC)
643
+ def forward(
644
+ self,
645
+ input_ids: Optional[torch.Tensor] = None,
646
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
647
+ attention_mask: Optional[torch.Tensor] = None,
648
+ token_type_ids: Optional[torch.Tensor] = None,
649
+ position_ids: Optional[torch.Tensor] = None,
650
+ head_mask: Optional[torch.Tensor] = None,
651
+ inputs_embeds: Optional[torch.Tensor] = None,
652
+ encoder_hidden_states: Optional[torch.Tensor] = None,
653
+ encoder_attention_mask: Optional[torch.Tensor] = None,
654
+ use_cache: Optional[bool] = None,
655
+ output_attentions: Optional[bool] = None,
656
+ output_hidden_states: Optional[bool] = None,
657
+ return_dict: Optional[bool] = None,
658
+ **kwargs: Any,
659
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
660
+ r"""
661
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
662
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
663
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
664
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
665
+
666
+ Returns:
667
+
668
+ Examples:
669
+
670
+ ```python
671
+ >>> from transformers import AutoImageProcessor, ImageGPTModel
672
+ >>> from PIL import Image
673
+ >>> import requests
674
+
675
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
676
+ >>> image = Image.open(requests.get(url, stream=True).raw)
677
+
678
+ >>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
679
+ >>> model = ImageGPTModel.from_pretrained("openai/imagegpt-small")
680
+
681
+ >>> inputs = image_processor(images=image, return_tensors="pt")
682
+ >>> outputs = model(**inputs)
683
+ >>> last_hidden_states = outputs.last_hidden_state
684
+ ```"""
685
+
686
+ if "pixel_values" in kwargs:
687
+ warnings.warn(
688
+ "The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
689
+ " instead.",
690
+ FutureWarning,
691
+ )
692
+
693
+ if input_ids is not None:
694
+ raise ValueError(
695
+ "You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`."
696
+ )
697
+
698
+ input_ids = kwargs.pop("pixel_values")
699
+
700
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
701
+ output_hidden_states = (
702
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
703
+ )
704
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
705
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
706
+
707
+ if input_ids is not None and inputs_embeds is not None:
708
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
709
+ elif input_ids is not None:
710
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
711
+ input_shape = input_ids.size()
712
+ input_ids = input_ids.view(-1, input_shape[-1])
713
+ batch_size = input_ids.shape[0]
714
+ elif inputs_embeds is not None:
715
+ input_shape = inputs_embeds.size()[:-1]
716
+ batch_size = inputs_embeds.shape[0]
717
+ else:
718
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
719
+
720
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
721
+
722
+ if token_type_ids is not None:
723
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
724
+
725
+ if past_key_values is None:
726
+ past_length = 0
727
+ past_key_values = tuple([None] * len(self.h))
728
+ else:
729
+ past_length = past_key_values[0][0].size(-2)
730
+ if position_ids is None:
731
+ position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device)
732
+ position_ids = position_ids.unsqueeze(0)
733
+
734
+ # ImageGPTAttention mask.
735
+ if attention_mask is not None:
736
+ if batch_size <= 0:
737
+ raise ValueError("batch_size has to be defined and > 0")
738
+ attention_mask = attention_mask.view(batch_size, -1)
739
+ # We create a 3D attention mask from a 2D tensor mask.
740
+ # Sizes are [batch_size, 1, 1, to_seq_length]
741
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
742
+ # this attention mask is more simple than the triangular masking of causal attention
743
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
744
+ attention_mask = attention_mask[:, None, None, :]
745
+
746
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
747
+ # masked positions, this operation will create a tensor which is 0.0 for
748
+ # positions we want to attend and the dtype's smallest value for masked positions.
749
+ # Since we are adding it to the raw scores before the softmax, this is
750
+ # effectively the same as removing these entirely.
751
+ attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
752
+ attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
753
+
754
+ # If a 2D or 3D attention mask is provided for the cross-attention
755
+ # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
756
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
757
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
758
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
759
+ if encoder_attention_mask is None:
760
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
761
+ encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
762
+ else:
763
+ encoder_attention_mask = None
764
+
765
+ # Prepare head mask if needed
766
+ # 1.0 in head_mask indicate we keep the head
767
+ # attention_probs has shape bsz x n_heads x N x N
768
+ # head_mask has shape n_layer x batch x n_heads x N x N
769
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
770
+
771
+ if inputs_embeds is None:
772
+ inputs_embeds = self.wte(input_ids)
773
+ position_embeds = self.wpe(position_ids)
774
+ hidden_states = inputs_embeds + position_embeds
775
+
776
+ if token_type_ids is not None:
777
+ token_type_embeds = self.wte(token_type_ids)
778
+ hidden_states = hidden_states + token_type_embeds
779
+
780
+ hidden_states = self.drop(hidden_states)
781
+
782
+ output_shape = input_shape + (hidden_states.size(-1),)
783
+
784
+ if self.gradient_checkpointing and self.training:
785
+ if use_cache:
786
+ logger.warning_once(
787
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
788
+ )
789
+ use_cache = False
790
+
791
+ presents = () if use_cache else None
792
+ all_self_attentions = () if output_attentions else None
793
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
794
+ all_hidden_states = () if output_hidden_states else None
795
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
796
+ # Model parallel
797
+ if self.model_parallel:
798
+ torch.cuda.set_device(hidden_states.device)
799
+ # Ensure layer_past is on same device as hidden_states (might not be correct)
800
+ if layer_past is not None:
801
+ layer_past = tuple(past_state.to(hidden_states.device) for past_state in layer_past)
802
+ # Ensure that attention_mask is always on the same device as hidden_states
803
+ if attention_mask is not None:
804
+ attention_mask = attention_mask.to(hidden_states.device)
805
+ if isinstance(head_mask, torch.Tensor):
806
+ head_mask = head_mask.to(hidden_states.device)
807
+ if output_hidden_states:
808
+ all_hidden_states = all_hidden_states + (hidden_states,)
809
+
810
+ if self.gradient_checkpointing and self.training:
811
+ outputs = self._gradient_checkpointing_func(
812
+ block.__call__,
813
+ hidden_states,
814
+ None,
815
+ attention_mask,
816
+ head_mask[i],
817
+ encoder_hidden_states,
818
+ encoder_attention_mask,
819
+ use_cache,
820
+ output_attentions,
821
+ )
822
+ else:
823
+ outputs = block(
824
+ hidden_states,
825
+ layer_past=layer_past,
826
+ attention_mask=attention_mask,
827
+ head_mask=head_mask[i],
828
+ encoder_hidden_states=encoder_hidden_states,
829
+ encoder_attention_mask=encoder_attention_mask,
830
+ use_cache=use_cache,
831
+ output_attentions=output_attentions,
832
+ )
833
+
834
+ hidden_states = outputs[0]
835
+ if use_cache is True:
836
+ presents = presents + (outputs[1],)
837
+
838
+ if output_attentions:
839
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
840
+ if self.config.add_cross_attention:
841
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
842
+
843
+ # Model Parallel: If it's the last layer for that device, put things on the next device
844
+ if self.model_parallel:
845
+ for k, v in self.device_map.items():
846
+ if i == v[-1] and "cuda:" + str(k) != self.last_device:
847
+ hidden_states = hidden_states.to("cuda:" + str(k + 1))
848
+
849
+ hidden_states = self.ln_f(hidden_states)
850
+
851
+ hidden_states = hidden_states.view(*output_shape)
852
+ # Add last hidden state
853
+ if output_hidden_states:
854
+ all_hidden_states = all_hidden_states + (hidden_states,)
855
+
856
+ if not return_dict:
857
+ return tuple(
858
+ v
859
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
860
+ if v is not None
861
+ )
862
+
863
+ return BaseModelOutputWithPastAndCrossAttentions(
864
+ last_hidden_state=hidden_states,
865
+ past_key_values=presents,
866
+ hidden_states=all_hidden_states,
867
+ attentions=all_self_attentions,
868
+ cross_attentions=all_cross_attentions,
869
+ )
870
+
871
+
872
+ @add_start_docstrings(
873
+ """
874
+ The ImageGPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
875
+ embeddings).
876
+ """,
877
+ IMAGEGPT_START_DOCSTRING,
878
+ )
879
+ class ImageGPTForCausalImageModeling(ImageGPTPreTrainedModel):
880
+ _tied_weights_keys = ["lm_head.weight"]
881
+
882
+ def __init__(self, config: ImageGPTConfig):
883
+ super().__init__(config)
884
+ self.transformer = ImageGPTModel(config)
885
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size - 1, bias=False)
886
+
887
+ # Model parallel
888
+ self.model_parallel = False
889
+ self.device_map = None
890
+ # Initialize weights and apply final processing
891
+ self.post_init()
892
+
893
+ def get_output_embeddings(self):
894
+ return self.lm_head
895
+
896
+ def set_output_embeddings(self, new_embeddings):
897
+ self.lm_head = new_embeddings
898
+
899
+ def prepare_inputs_for_generation(self, input_ids: torch.Tensor, past_key_values: Optional[bool] = None, **kwargs):
900
+ token_type_ids = kwargs.get("token_type_ids", None)
901
+ # Omit tokens covered by past_key_values
902
+ if past_key_values:
903
+ past_length = past_key_values[0][0].shape[2]
904
+
905
+ # Some generation methods already pass only the last input ID
906
+ if input_ids.shape[1] > past_length:
907
+ remove_prefix_length = past_length
908
+ else:
909
+ # Default to old behavior: keep only final ID
910
+ remove_prefix_length = input_ids.shape[1] - 1
911
+
912
+ input_ids = input_ids[:, remove_prefix_length:]
913
+ if token_type_ids is not None:
914
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
915
+
916
+ attention_mask = kwargs.get("attention_mask", None)
917
+ position_ids = kwargs.get("position_ids", None)
918
+
919
+ if attention_mask is not None and position_ids is None:
920
+ # create position_ids on the fly for batch generation
921
+ position_ids = attention_mask.long().cumsum(-1) - 1
922
+ position_ids.masked_fill_(attention_mask == 0, 1)
923
+ if past_key_values:
924
+ position_ids = position_ids[:, -input_ids.shape[1] :]
925
+ else:
926
+ position_ids = None
927
+ return {
928
+ "input_ids": input_ids,
929
+ "past_key_values": past_key_values,
930
+ "use_cache": kwargs.get("use_cache"),
931
+ "position_ids": position_ids,
932
+ "attention_mask": attention_mask,
933
+ "token_type_ids": token_type_ids,
934
+ }
935
+
936
+ @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING)
937
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
938
+ def forward(
939
+ self,
940
+ input_ids: Optional[torch.Tensor] = None,
941
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
942
+ attention_mask: Optional[torch.Tensor] = None,
943
+ token_type_ids: Optional[torch.Tensor] = None,
944
+ position_ids: Optional[torch.Tensor] = None,
945
+ head_mask: Optional[torch.Tensor] = None,
946
+ inputs_embeds: Optional[torch.Tensor] = None,
947
+ encoder_hidden_states: Optional[torch.Tensor] = None,
948
+ encoder_attention_mask: Optional[torch.Tensor] = None,
949
+ labels: Optional[torch.Tensor] = None,
950
+ use_cache: Optional[bool] = None,
951
+ output_attentions: Optional[bool] = None,
952
+ output_hidden_states: Optional[bool] = None,
953
+ return_dict: Optional[bool] = None,
954
+ **kwargs: Any,
955
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
956
+ r"""
957
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
958
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
959
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
960
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
961
+
962
+ Returns:
963
+
964
+ Examples:
965
+
966
+ ```python
967
+ >>> from transformers import AutoImageProcessor, ImageGPTForCausalImageModeling
968
+ >>> import torch
969
+ >>> import matplotlib.pyplot as plt
970
+ >>> import numpy as np
971
+
972
+ >>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
973
+ >>> model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small")
974
+ >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
975
+ >>> model.to(device) # doctest: +IGNORE_RESULT
976
+
977
+ >>> # unconditional generation of 8 images
978
+ >>> batch_size = 4
979
+ >>> context = torch.full((batch_size, 1), model.config.vocab_size - 1) # initialize with SOS token
980
+ >>> context = context.to(device)
981
+ >>> output = model.generate(
982
+ ... input_ids=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40
983
+ ... )
984
+
985
+ >>> clusters = image_processor.clusters
986
+ >>> height = image_processor.size["height"]
987
+ >>> width = image_processor.size["width"]
988
+
989
+ >>> samples = output[:, 1:].cpu().detach().numpy()
990
+ >>> samples_img = [
991
+ ... np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [height, width, 3]).astype(np.uint8) for s in samples
992
+ ... ] # convert color cluster tokens back to pixels
993
+ >>> f, axes = plt.subplots(1, batch_size, dpi=300)
994
+
995
+ >>> for img, ax in zip(samples_img, axes): # doctest: +IGNORE_RESULT
996
+ ... ax.axis("off")
997
+ ... ax.imshow(img)
998
+ ```"""
999
+
1000
+ if "pixel_values" in kwargs:
1001
+ warnings.warn(
1002
+ "The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
1003
+ " instead.",
1004
+ FutureWarning,
1005
+ )
1006
+
1007
+ if input_ids is not None:
1008
+ raise ValueError(
1009
+ "You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`."
1010
+ )
1011
+
1012
+ input_ids = kwargs.pop("pixel_values")
1013
+
1014
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1015
+
1016
+ transformer_outputs = self.transformer(
1017
+ input_ids,
1018
+ past_key_values=past_key_values,
1019
+ attention_mask=attention_mask,
1020
+ token_type_ids=token_type_ids,
1021
+ position_ids=position_ids,
1022
+ head_mask=head_mask,
1023
+ inputs_embeds=inputs_embeds,
1024
+ encoder_hidden_states=encoder_hidden_states,
1025
+ encoder_attention_mask=encoder_attention_mask,
1026
+ use_cache=use_cache,
1027
+ output_attentions=output_attentions,
1028
+ output_hidden_states=output_hidden_states,
1029
+ return_dict=return_dict,
1030
+ )
1031
+ hidden_states = transformer_outputs[0]
1032
+
1033
+ lm_logits = self.lm_head(hidden_states)
1034
+
1035
+ loss = None
1036
+ if labels is not None:
1037
+ # Shift so that tokens < n predict n
1038
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1039
+ shift_labels = labels[..., 1:].contiguous()
1040
+ # Flatten the tokens
1041
+ loss_fct = CrossEntropyLoss()
1042
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1043
+
1044
+ if not return_dict:
1045
+ output = (lm_logits,) + transformer_outputs[1:]
1046
+ return ((loss,) + output) if loss is not None else output
1047
+
1048
+ return CausalLMOutputWithCrossAttentions(
1049
+ loss=loss,
1050
+ logits=lm_logits,
1051
+ past_key_values=transformer_outputs.past_key_values,
1052
+ hidden_states=transformer_outputs.hidden_states,
1053
+ attentions=transformer_outputs.attentions,
1054
+ cross_attentions=transformer_outputs.cross_attentions,
1055
+ )
1056
+
1057
+ @staticmethod
1058
+ def _reorder_cache(
1059
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
1060
+ ) -> Tuple[Tuple[torch.Tensor]]:
1061
+ """
1062
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1063
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1064
+ beam_idx at every generation step.
1065
+ """
1066
+ return tuple(
1067
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
1068
+ for layer_past in past_key_values
1069
+ )
1070
+
1071
+
1072
+ @add_start_docstrings(
1073
+ """
1074
+ The ImageGPT Model transformer with an image classification head on top (linear layer).
1075
+ [`ImageGPTForImageClassification`] average-pools the hidden states in order to do the classification.
1076
+ """,
1077
+ IMAGEGPT_START_DOCSTRING,
1078
+ )
1079
+ class ImageGPTForImageClassification(ImageGPTPreTrainedModel):
1080
+ def __init__(self, config: ImageGPTConfig):
1081
+ super().__init__(config)
1082
+ self.num_labels = config.num_labels
1083
+ self.transformer = ImageGPTModel(config)
1084
+ self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
1085
+
1086
+ # Initialize weights and apply final processing
1087
+ self.post_init()
1088
+
1089
+ @add_start_docstrings_to_model_forward(IMAGEGPT_INPUTS_DOCSTRING)
1090
+ @replace_return_docstrings(output_type=SequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC)
1091
+ def forward(
1092
+ self,
1093
+ input_ids: Optional[torch.Tensor] = None,
1094
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
1095
+ attention_mask: Optional[torch.Tensor] = None,
1096
+ token_type_ids: Optional[torch.Tensor] = None,
1097
+ position_ids: Optional[torch.Tensor] = None,
1098
+ head_mask: Optional[torch.Tensor] = None,
1099
+ inputs_embeds: Optional[torch.Tensor] = None,
1100
+ labels: Optional[torch.Tensor] = None,
1101
+ use_cache: Optional[bool] = None,
1102
+ output_attentions: Optional[bool] = None,
1103
+ output_hidden_states: Optional[bool] = None,
1104
+ return_dict: Optional[bool] = None,
1105
+ **kwargs: Any,
1106
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1107
+ r"""
1108
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1109
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1110
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1111
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1112
+
1113
+ Returns:
1114
+
1115
+ Examples:
1116
+
1117
+ ```python
1118
+ >>> from transformers import AutoImageProcessor, ImageGPTForImageClassification
1119
+ >>> from PIL import Image
1120
+ >>> import requests
1121
+
1122
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1123
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1124
+
1125
+ >>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
1126
+ >>> model = ImageGPTForImageClassification.from_pretrained("openai/imagegpt-small")
1127
+
1128
+ >>> inputs = image_processor(images=image, return_tensors="pt")
1129
+ >>> outputs = model(**inputs)
1130
+ >>> logits = outputs.logits
1131
+ ```"""
1132
+
1133
+ if "pixel_values" in kwargs:
1134
+ warnings.warn(
1135
+ "The `pixel_values` argument is deprecated and will be removed in a future version, use `input_ids`"
1136
+ " instead.",
1137
+ FutureWarning,
1138
+ )
1139
+
1140
+ if input_ids is not None:
1141
+ raise ValueError(
1142
+ "You cannot pass both `pixel_values` and `input_ids`. Please make sure to only pass `input_ids`."
1143
+ )
1144
+
1145
+ input_ids = kwargs.pop("pixel_values")
1146
+
1147
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1148
+
1149
+ transformer_outputs = self.transformer(
1150
+ input_ids,
1151
+ past_key_values=past_key_values,
1152
+ attention_mask=attention_mask,
1153
+ token_type_ids=token_type_ids,
1154
+ position_ids=position_ids,
1155
+ head_mask=head_mask,
1156
+ inputs_embeds=inputs_embeds,
1157
+ use_cache=use_cache,
1158
+ output_attentions=output_attentions,
1159
+ output_hidden_states=output_hidden_states,
1160
+ return_dict=return_dict,
1161
+ )
1162
+ hidden_states = transformer_outputs[0]
1163
+ # average-pool the hidden states along the sequence dimension
1164
+ pooled_hidden_states = hidden_states.mean(dim=1)
1165
+ # project from (batch_size, hidden_size) to (batch_size, num_labels)
1166
+ logits = self.score(pooled_hidden_states)
1167
+
1168
+ loss = None
1169
+ if labels is not None:
1170
+ if self.config.problem_type is None:
1171
+ if self.num_labels == 1:
1172
+ self.config.problem_type = "regression"
1173
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1174
+ self.config.problem_type = "single_label_classification"
1175
+ else:
1176
+ self.config.problem_type = "multi_label_classification"
1177
+
1178
+ if self.config.problem_type == "regression":
1179
+ loss_fct = MSELoss()
1180
+ if self.num_labels == 1:
1181
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1182
+ else:
1183
+ loss = loss_fct(logits, labels)
1184
+ elif self.config.problem_type == "single_label_classification":
1185
+ loss_fct = CrossEntropyLoss()
1186
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1187
+ elif self.config.problem_type == "multi_label_classification":
1188
+ loss_fct = BCEWithLogitsLoss()
1189
+ loss = loss_fct(logits, labels)
1190
+ if not return_dict:
1191
+ output = (logits,) + transformer_outputs[1:]
1192
+ return ((loss,) + output) if loss is not None else output
1193
+
1194
+ return SequenceClassifierOutputWithPast(
1195
+ loss=loss,
1196
+ logits=logits,
1197
+ past_key_values=transformer_outputs.past_key_values,
1198
+ hidden_states=transformer_outputs.hidden_states,
1199
+ attentions=transformer_outputs.attentions,
1200
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__init__.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ # rely on isort to merge the imports
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_informer": [
22
+ "INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
23
+ "InformerConfig",
24
+ ],
25
+ }
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_informer"] = [
34
+ "INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "InformerForPrediction",
36
+ "InformerModel",
37
+ "InformerPreTrainedModel",
38
+ ]
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
43
+
44
+ try:
45
+ if not is_torch_available():
46
+ raise OptionalDependencyNotAvailable()
47
+ except OptionalDependencyNotAvailable:
48
+ pass
49
+ else:
50
+ from .modeling_informer import (
51
+ INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
52
+ InformerForPrediction,
53
+ InformerModel,
54
+ InformerPreTrainedModel,
55
+ )
56
+
57
+ else:
58
+ import sys
59
+
60
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (941 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/configuration_informer.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/informer/__pycache__/modeling_informer.cpython-310.pyc ADDED
Binary file (69.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/informer/configuration_informer.py ADDED
@@ -0,0 +1,249 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Informer model configuration"""
16
+
17
+ from typing import List, Optional, Union
18
+
19
+ from ...configuration_utils import PretrainedConfig
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class InformerConfig(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of an [`InformerModel`]. It is used to instantiate an
32
+ Informer model according to the specified arguments, defining the model architecture. Instantiating a configuration
33
+ with the defaults will yield a similar configuration to that of the Informer
34
+ [huggingface/informer-tourism-monthly](https://huggingface.co/huggingface/informer-tourism-monthly) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ prediction_length (`int`):
41
+ The prediction length for the decoder. In other words, the prediction horizon of the model. This value is
42
+ typically dictated by the dataset and we recommend to set it appropriately.
43
+ context_length (`int`, *optional*, defaults to `prediction_length`):
44
+ The context length for the encoder. If `None`, the context length will be the same as the
45
+ `prediction_length`.
46
+ distribution_output (`string`, *optional*, defaults to `"student_t"`):
47
+ The distribution emission head for the model. Could be either "student_t", "normal" or "negative_binomial".
48
+ loss (`string`, *optional*, defaults to `"nll"`):
49
+ The loss function for the model corresponding to the `distribution_output` head. For parametric
50
+ distributions it is the negative log likelihood (nll) - which currently is the only supported one.
51
+ input_size (`int`, *optional*, defaults to 1):
52
+ The size of the target variable which by default is 1 for univariate targets. Would be > 1 in case of
53
+ multivariate targets.
54
+ scaling (`string` or `bool`, *optional* defaults to `"mean"`):
55
+ Whether to scale the input targets via "mean" scaler, "std" scaler or no scaler if `None`. If `True`, the
56
+ scaler is set to "mean".
57
+ lags_sequence (`list[int]`, *optional*, defaults to `[1, 2, 3, 4, 5, 6, 7]`):
58
+ The lags of the input time series as covariates often dictated by the frequency of the data. Default is
59
+ `[1, 2, 3, 4, 5, 6, 7]` but we recommend to change it based on the dataset appropriately.
60
+ num_time_features (`int`, *optional*, defaults to 0):
61
+ The number of time features in the input time series.
62
+ num_dynamic_real_features (`int`, *optional*, defaults to 0):
63
+ The number of dynamic real valued features.
64
+ num_static_categorical_features (`int`, *optional*, defaults to 0):
65
+ The number of static categorical features.
66
+ num_static_real_features (`int`, *optional*, defaults to 0):
67
+ The number of static real valued features.
68
+ cardinality (`list[int]`, *optional*):
69
+ The cardinality (number of different values) for each of the static categorical features. Should be a list
70
+ of integers, having the same length as `num_static_categorical_features`. Cannot be `None` if
71
+ `num_static_categorical_features` is > 0.
72
+ embedding_dimension (`list[int]`, *optional*):
73
+ The dimension of the embedding for each of the static categorical features. Should be a list of integers,
74
+ having the same length as `num_static_categorical_features`. Cannot be `None` if
75
+ `num_static_categorical_features` is > 0.
76
+ d_model (`int`, *optional*, defaults to 64):
77
+ Dimensionality of the transformer layers.
78
+ encoder_layers (`int`, *optional*, defaults to 2):
79
+ Number of encoder layers.
80
+ decoder_layers (`int`, *optional*, defaults to 2):
81
+ Number of decoder layers.
82
+ encoder_attention_heads (`int`, *optional*, defaults to 2):
83
+ Number of attention heads for each attention layer in the Transformer encoder.
84
+ decoder_attention_heads (`int`, *optional*, defaults to 2):
85
+ Number of attention heads for each attention layer in the Transformer decoder.
86
+ encoder_ffn_dim (`int`, *optional*, defaults to 32):
87
+ Dimension of the "intermediate" (often named feed-forward) layer in encoder.
88
+ decoder_ffn_dim (`int`, *optional*, defaults to 32):
89
+ Dimension of the "intermediate" (often named feed-forward) layer in decoder.
90
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
91
+ The non-linear activation function (function or string) in the encoder and decoder. If string, `"gelu"` and
92
+ `"relu"` are supported.
93
+ dropout (`float`, *optional*, defaults to 0.1):
94
+ The dropout probability for all fully connected layers in the encoder, and decoder.
95
+ encoder_layerdrop (`float`, *optional*, defaults to 0.1):
96
+ The dropout probability for the attention and fully connected layers for each encoder layer.
97
+ decoder_layerdrop (`float`, *optional*, defaults to 0.1):
98
+ The dropout probability for the attention and fully connected layers for each decoder layer.
99
+ attention_dropout (`float`, *optional*, defaults to 0.1):
100
+ The dropout probability for the attention probabilities.
101
+ activation_dropout (`float`, *optional*, defaults to 0.1):
102
+ The dropout probability used between the two layers of the feed-forward networks.
103
+ num_parallel_samples (`int`, *optional*, defaults to 100):
104
+ The number of samples to generate in parallel for each time step of inference.
105
+ init_std (`float`, *optional*, defaults to 0.02):
106
+ The standard deviation of the truncated normal weight initialization distribution.
107
+ use_cache (`bool`, *optional*, defaults to `True`):
108
+ Whether to use the past key/values attentions (if applicable to the model) to speed up decoding.
109
+ attention_type (`str`, *optional*, defaults to "prob"):
110
+ Attention used in encoder. This can be set to "prob" (Informer's ProbAttention) or "full" (vanilla
111
+ transformer's canonical self-attention).
112
+ sampling_factor (`int`, *optional*, defaults to 5):
113
+ ProbSparse sampling factor (only makes affect when `attention_type`="prob"). It is used to control the
114
+ reduced query matrix (Q_reduce) input length.
115
+ distil (`bool`, *optional*, defaults to `True`):
116
+ Whether to use distilling in encoder.
117
+
118
+ Example:
119
+
120
+ ```python
121
+ >>> from transformers import InformerConfig, InformerModel
122
+
123
+ >>> # Initializing an Informer configuration with 12 time steps for prediction
124
+ >>> configuration = InformerConfig(prediction_length=12)
125
+
126
+ >>> # Randomly initializing a model (with random weights) from the configuration
127
+ >>> model = InformerModel(configuration)
128
+
129
+ >>> # Accessing the model configuration
130
+ >>> configuration = model.config
131
+ ```"""
132
+
133
+ model_type = "informer"
134
+ attribute_map = {
135
+ "hidden_size": "d_model",
136
+ "num_attention_heads": "encoder_attention_heads",
137
+ "num_hidden_layers": "encoder_layers",
138
+ }
139
+
140
+ def __init__(
141
+ self,
142
+ prediction_length: Optional[int] = None,
143
+ context_length: Optional[int] = None,
144
+ distribution_output: str = "student_t",
145
+ loss: str = "nll",
146
+ input_size: int = 1,
147
+ lags_sequence: List[int] = None,
148
+ scaling: Optional[Union[str, bool]] = "mean",
149
+ num_dynamic_real_features: int = 0,
150
+ num_static_real_features: int = 0,
151
+ num_static_categorical_features: int = 0,
152
+ num_time_features: int = 0,
153
+ cardinality: Optional[List[int]] = None,
154
+ embedding_dimension: Optional[List[int]] = None,
155
+ d_model: int = 64,
156
+ encoder_ffn_dim: int = 32,
157
+ decoder_ffn_dim: int = 32,
158
+ encoder_attention_heads: int = 2,
159
+ decoder_attention_heads: int = 2,
160
+ encoder_layers: int = 2,
161
+ decoder_layers: int = 2,
162
+ is_encoder_decoder: bool = True,
163
+ activation_function: str = "gelu",
164
+ dropout: float = 0.05,
165
+ encoder_layerdrop: float = 0.1,
166
+ decoder_layerdrop: float = 0.1,
167
+ attention_dropout: float = 0.1,
168
+ activation_dropout: float = 0.1,
169
+ num_parallel_samples: int = 100,
170
+ init_std: float = 0.02,
171
+ use_cache=True,
172
+ # Informer arguments
173
+ attention_type: str = "prob",
174
+ sampling_factor: int = 5,
175
+ distil: bool = True,
176
+ **kwargs,
177
+ ):
178
+ # time series specific configuration
179
+ self.prediction_length = prediction_length
180
+ self.context_length = context_length or prediction_length
181
+ self.distribution_output = distribution_output
182
+ self.loss = loss
183
+ self.input_size = input_size
184
+ self.num_time_features = num_time_features
185
+ self.lags_sequence = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
186
+ self.scaling = scaling
187
+ self.num_dynamic_real_features = num_dynamic_real_features
188
+ self.num_static_real_features = num_static_real_features
189
+ self.num_static_categorical_features = num_static_categorical_features
190
+
191
+ # set cardinality
192
+ if cardinality and num_static_categorical_features > 0:
193
+ if len(cardinality) != num_static_categorical_features:
194
+ raise ValueError(
195
+ "The cardinality should be a list of the same length as `num_static_categorical_features`"
196
+ )
197
+ self.cardinality = cardinality
198
+ else:
199
+ self.cardinality = [0]
200
+
201
+ # set embedding_dimension
202
+ if embedding_dimension and num_static_categorical_features > 0:
203
+ if len(embedding_dimension) != num_static_categorical_features:
204
+ raise ValueError(
205
+ "The embedding dimension should be a list of the same length as `num_static_categorical_features`"
206
+ )
207
+ self.embedding_dimension = embedding_dimension
208
+ else:
209
+ self.embedding_dimension = [min(50, (cat + 1) // 2) for cat in self.cardinality]
210
+
211
+ self.num_parallel_samples = num_parallel_samples
212
+
213
+ # Transformer architecture configuration
214
+ self.feature_size = input_size * len(self.lags_sequence) + self._number_of_features
215
+ self.d_model = d_model
216
+ self.encoder_attention_heads = encoder_attention_heads
217
+ self.decoder_attention_heads = decoder_attention_heads
218
+ self.encoder_ffn_dim = encoder_ffn_dim
219
+ self.decoder_ffn_dim = decoder_ffn_dim
220
+ self.encoder_layers = encoder_layers
221
+ self.decoder_layers = decoder_layers
222
+
223
+ self.dropout = dropout
224
+ self.attention_dropout = attention_dropout
225
+ self.activation_dropout = activation_dropout
226
+ self.encoder_layerdrop = encoder_layerdrop
227
+ self.decoder_layerdrop = decoder_layerdrop
228
+
229
+ self.activation_function = activation_function
230
+ self.init_std = init_std
231
+
232
+ self.use_cache = use_cache
233
+
234
+ # Informer
235
+ self.attention_type = attention_type
236
+ self.sampling_factor = sampling_factor
237
+ self.distil = distil
238
+
239
+ super().__init__(is_encoder_decoder=is_encoder_decoder, **kwargs)
240
+
241
+ @property
242
+ def _number_of_features(self) -> int:
243
+ return (
244
+ sum(self.embedding_dimension)
245
+ + self.num_dynamic_real_features
246
+ + self.num_time_features
247
+ + self.num_static_real_features
248
+ + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
249
+ )
llmeval-env/lib/python3.10/site-packages/transformers/models/informer/modeling_informer.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__init__.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"],
22
+ }
23
+
24
+ try:
25
+ if not is_torch_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["modeling_longt5"] = [
31
+ "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST",
32
+ "LongT5EncoderModel",
33
+ "LongT5ForConditionalGeneration",
34
+ "LongT5Model",
35
+ "LongT5PreTrainedModel",
36
+ ]
37
+
38
+ try:
39
+ if not is_flax_available():
40
+ raise OptionalDependencyNotAvailable()
41
+ except OptionalDependencyNotAvailable:
42
+ pass
43
+ else:
44
+ _import_structure["modeling_flax_longt5"] = [
45
+ "FlaxLongT5ForConditionalGeneration",
46
+ "FlaxLongT5Model",
47
+ "FlaxLongT5PreTrainedModel",
48
+ ]
49
+
50
+
51
+ if TYPE_CHECKING:
52
+ from .configuration_longt5 import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongT5Config, LongT5OnnxConfig
53
+
54
+ try:
55
+ if not is_torch_available():
56
+ raise OptionalDependencyNotAvailable()
57
+ except OptionalDependencyNotAvailable:
58
+ pass
59
+ else:
60
+ from .modeling_longt5 import (
61
+ LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
62
+ LongT5EncoderModel,
63
+ LongT5ForConditionalGeneration,
64
+ LongT5Model,
65
+ LongT5PreTrainedModel,
66
+ )
67
+
68
+ try:
69
+ if not is_flax_available():
70
+ raise OptionalDependencyNotAvailable()
71
+ except OptionalDependencyNotAvailable:
72
+ pass
73
+ else:
74
+ from .modeling_flax_longt5 import (
75
+ FlaxLongT5ForConditionalGeneration,
76
+ FlaxLongT5Model,
77
+ FlaxLongT5PreTrainedModel,
78
+ )
79
+
80
+
81
+ else:
82
+ import sys
83
+
84
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/configuration_longt5.cpython-310.pyc ADDED
Binary file (6.92 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/convert_longt5x_checkpoint_to_flax.cpython-310.pyc ADDED
Binary file (5.09 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_flax_longt5.cpython-310.pyc ADDED
Binary file (60 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/__pycache__/modeling_longt5.cpython-310.pyc ADDED
Binary file (58.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/configuration_longt5.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022, The LongT5 Authors and HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ LongT5 model configuration"""
16
+ from typing import Mapping
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...onnx import OnnxSeq2SeqConfigWithPast
20
+ from ...utils import logging
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ from ..deprecated._archive_maps import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
27
+
28
+
29
+ class LongT5Config(PretrainedConfig):
30
+ r"""
31
+ This is the configuration class to store the configuration of a [`LongT5Model`] or a [`FlaxLongT5Model`]. It is
32
+ used to instantiate a LongT5 model according to the specified arguments, defining the model architecture.
33
+ Instantiating a configuration with the defaults will yield a similar configuration to that of the LongT5
34
+ [google/long-t5-local-base](https://huggingface.co/google/long-t5-local-base) architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Arguments:
40
+ vocab_size (`int`, *optional*, defaults to 32128):
41
+ Vocabulary size of the LongT5 model. Defines the number of different tokens that can be represented by the
42
+ `inputs_ids` passed when calling [`LongT5Model`].
43
+ d_model (`int`, *optional*, defaults to 512):
44
+ Size of the encoder layers and the pooler layer.
45
+ d_kv (`int`, *optional*, defaults to 64):
46
+ Size of the key, query, value projections per attention head. `d_kv` has to be equal to `d_model //
47
+ num_heads`.
48
+ d_ff (`int`, *optional*, defaults to 2048):
49
+ Size of the intermediate feed forward layer in each `LongT5Block`.
50
+ num_layers (`int`, *optional*, defaults to 6):
51
+ Number of hidden layers in the Transformer encoder.
52
+ num_decoder_layers (`int`, *optional*):
53
+ Number of hidden layers in the Transformer decoder. Will use the same value as `num_layers` if not set.
54
+ num_heads (`int`, *optional*, defaults to 8):
55
+ Number of attention heads for each attention layer in the Transformer encoder.
56
+ local_radius (`int`, *optional*, defaults to 127)
57
+ Number of tokens to the left/right for each token to locally self-attend in a local attention mechanism.
58
+ global_block_size (`int`, *optional*, defaults to 16)
59
+ Lenght of blocks an input sequence is divided into for a global token representation. Used only for
60
+ `encoder_attention_type = "transient-global"`.
61
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
62
+ The number of buckets to use for each attention layer.
63
+ relative_attention_max_distance (`int`, *optional*, defaults to 128):
64
+ The maximum distance of the longer sequences for the bucket separation.
65
+ dropout_rate (`float`, *optional*, defaults to 0.1):
66
+ The ratio for all dropout layers.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-6):
68
+ The epsilon used by the layer normalization layers.
69
+ initializer_factor (`float`, *optional*, defaults to 1):
70
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
71
+ testing).
72
+ feed_forward_proj (`string`, *optional*, defaults to `"relu"`):
73
+ Type of feed forward layer to be used. Should be one of `"relu"` or `"gated-gelu"`. LongT5v1.1 uses the
74
+ `"gated-gelu"` feed forward projection. Original LongT5 implementation uses `"gated-gelu"`.
75
+ encoder_attention_type (`string`, *optional*, defaults to `"local"`):
76
+ Type of encoder attention to be used. Should be one of `"local"` or `"transient-global"`, which are
77
+ supported by LongT5 implementation.
78
+ use_cache (`bool`, *optional*, defaults to `True`):
79
+ Whether or not the model should return the last key/values attentions (not used by all models).
80
+ """
81
+
82
+ model_type = "longt5"
83
+ keys_to_ignore_at_inference = ["past_key_values"]
84
+ attribute_map = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
85
+
86
+ def __init__(
87
+ self,
88
+ vocab_size=32128,
89
+ d_model=512,
90
+ d_kv=64,
91
+ d_ff=2048,
92
+ num_layers=6,
93
+ num_decoder_layers=None,
94
+ num_heads=8,
95
+ local_radius=127,
96
+ global_block_size=16,
97
+ relative_attention_num_buckets=32,
98
+ relative_attention_max_distance=128,
99
+ dropout_rate=0.1,
100
+ layer_norm_epsilon=1e-6,
101
+ initializer_factor=1.0,
102
+ feed_forward_proj="relu",
103
+ is_encoder_decoder=True,
104
+ encoder_attention_type="local",
105
+ use_cache=True,
106
+ pad_token_id=0,
107
+ eos_token_id=1,
108
+ **kwargs,
109
+ ):
110
+ self.vocab_size = vocab_size
111
+ self.d_model = d_model
112
+ self.d_kv = d_kv
113
+ self.d_ff = d_ff
114
+ self.num_layers = num_layers
115
+ # default = symmetry
116
+ self.num_decoder_layers = num_decoder_layers if num_decoder_layers is not None else self.num_layers
117
+ self.num_heads = num_heads
118
+ self.local_radius = local_radius
119
+ self.global_block_size = global_block_size
120
+ self.relative_attention_num_buckets = relative_attention_num_buckets
121
+ self.relative_attention_max_distance = relative_attention_max_distance
122
+ self.dropout_rate = dropout_rate
123
+ self.layer_norm_epsilon = layer_norm_epsilon
124
+ self.initializer_factor = initializer_factor
125
+ self.feed_forward_proj = feed_forward_proj
126
+ self.encoder_attention_type = encoder_attention_type
127
+ self.use_cache = use_cache
128
+
129
+ act_info = self.feed_forward_proj.split("-")
130
+ self.dense_act_fn = act_info[-1]
131
+ self.is_gated_act = act_info[0] == "gated"
132
+
133
+ if len(act_info) > 1 and act_info[0] != "gated" or len(act_info) > 2:
134
+ raise ValueError(
135
+ f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer. "
136
+ "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
137
+ "'gated-gelu' or 'relu'"
138
+ )
139
+
140
+ # for backwards compatibility
141
+ if feed_forward_proj == "gated-gelu":
142
+ self.dense_act_fn = "gelu_new"
143
+
144
+ super().__init__(
145
+ pad_token_id=pad_token_id,
146
+ eos_token_id=eos_token_id,
147
+ is_encoder_decoder=is_encoder_decoder,
148
+ **kwargs,
149
+ )
150
+
151
+
152
+ class LongT5OnnxConfig(OnnxSeq2SeqConfigWithPast):
153
+ @property
154
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
155
+ common_inputs = {
156
+ "input_ids": {0: "batch", 1: "encoder_sequence"},
157
+ "attention_mask": {0: "batch", 1: "encoder_sequence"},
158
+ }
159
+ if self.use_past:
160
+ common_inputs["attention_mask"][1] = "past_encoder_sequence + sequence"
161
+ common_inputs["decoder_input_ids"] = {0: "batch"}
162
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
163
+ else:
164
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
165
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
166
+
167
+ if self.use_past:
168
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
169
+
170
+ return common_inputs
171
+
172
+ @property
173
+ def default_onnx_opset(self) -> int:
174
+ return 13
llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/convert_longt5x_checkpoint_to_flax.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Convert T5/LongT5X checkpoints from the original repository to JAX/FLAX model. This script is an extension of
17
+ 'src/transformers/models/t5/convert_t5x_checkpoint_to_flax.
18
+ """
19
+
20
+ import argparse
21
+
22
+ from t5x import checkpoints
23
+
24
+ from transformers import AutoConfig, FlaxAutoModelForSeq2SeqLM
25
+
26
+
27
+ def convert_t5x_checkpoint_to_flax(t5x_checkpoint_path, config_name, flax_dump_folder_path):
28
+ config = AutoConfig.from_pretrained(config_name)
29
+ flax_model = FlaxAutoModelForSeq2SeqLM.from_config(config=config)
30
+ t5x_model = checkpoints.load_t5x_checkpoint(t5x_checkpoint_path)
31
+
32
+ split_mlp_wi = "wi_0" in t5x_model["target"]["encoder"]["layers_0"]["mlp"]
33
+
34
+ if config.model_type == "t5":
35
+ encoder_attn_name = "SelfAttention"
36
+ if config.model_type == "longt5" and config.encoder_attention_type == "local":
37
+ encoder_attn_name = "LocalSelfAttention"
38
+ elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
39
+ encoder_attn_name = "TransientGlobalSelfAttention"
40
+ else:
41
+ raise ValueError(
42
+ "Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"
43
+ " attribute with a value from ['local', 'transient-global]."
44
+ )
45
+
46
+ # Encoder
47
+ for layer_index in range(config.num_layers):
48
+ layer_name = f"layers_{str(layer_index)}"
49
+
50
+ # Self-Attention
51
+ t5x_attention_key = t5x_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
52
+ t5x_attention_out = t5x_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
53
+ t5x_attention_query = t5x_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
54
+ t5x_attention_value = t5x_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
55
+
56
+ # Global input layer norm
57
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
58
+ t5x_global_layer_norm = t5x_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
59
+
60
+ # Layer Normalization
61
+ t5x_attention_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
62
+
63
+ if split_mlp_wi:
64
+ t5x_mlp_wi_0 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
65
+ t5x_mlp_wi_1 = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
66
+ else:
67
+ t5x_mlp_wi = t5x_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
68
+
69
+ t5x_mlp_wo = t5x_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
70
+
71
+ # Layer Normalization
72
+ t5x_mlp_layer_norm = t5x_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
73
+
74
+ # Assigning
75
+ flax_model_encoder_layer_block = flax_model.params["encoder"]["block"][str(layer_index)]["layer"]
76
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["k"]["kernel"] = t5x_attention_key
77
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["o"]["kernel"] = t5x_attention_out
78
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["q"]["kernel"] = t5x_attention_query
79
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["v"]["kernel"] = t5x_attention_value
80
+
81
+ flax_model_encoder_layer_block["0"]["layer_norm"]["weight"] = t5x_attention_layer_norm
82
+
83
+ # Global input layer norm
84
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
85
+ flax_model_encoder_layer_block["0"][encoder_attn_name]["global_input_layer_norm"][
86
+ "weight"
87
+ ] = t5x_global_layer_norm
88
+
89
+ if split_mlp_wi:
90
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0
91
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1
92
+ else:
93
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi
94
+
95
+ flax_model_encoder_layer_block["1"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo
96
+ flax_model_encoder_layer_block["1"]["layer_norm"]["weight"] = t5x_mlp_layer_norm
97
+
98
+ flax_model.params["encoder"]["block"][str(layer_index)]["layer"] = flax_model_encoder_layer_block
99
+
100
+ # Only for layer 0:
101
+ t5x_encoder_rel_embedding = t5x_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
102
+ flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["relative_attention_bias"][
103
+ "embedding"
104
+ ] = t5x_encoder_rel_embedding
105
+
106
+ # Side/global relative position_bias + layer norm
107
+ if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
108
+ t5x_encoder_global_rel_embedding = t5x_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
109
+ flax_model.params["encoder"]["block"]["0"]["layer"]["0"][encoder_attn_name]["global_relative_attention_bias"][
110
+ "embedding"
111
+ ] = t5x_encoder_global_rel_embedding
112
+
113
+ # Assigning
114
+ t5x_encoder_norm = t5x_model["target"]["encoder"]["encoder_norm"]["scale"]
115
+ flax_model.params["encoder"]["final_layer_norm"]["weight"] = t5x_encoder_norm
116
+
117
+ # Decoder
118
+ for layer_index in range(config.num_layers):
119
+ layer_name = f"layers_{str(layer_index)}"
120
+
121
+ # Self-Attention
122
+ t5x_attention_key = t5x_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
123
+ t5x_attention_out = t5x_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
124
+ t5x_attention_query = t5x_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
125
+ t5x_attention_value = t5x_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
126
+
127
+ # Layer Normalization
128
+ t5x_pre_attention_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
129
+ "scale"
130
+ ]
131
+
132
+ # Encoder-Decoder-Attention
133
+ t5x_enc_dec_attention_module = t5x_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
134
+ t5x_enc_dec_attention_key = t5x_enc_dec_attention_module["key"]["kernel"]
135
+ t5x_enc_dec_attention_out = t5x_enc_dec_attention_module["out"]["kernel"]
136
+ t5x_enc_dec_attention_query = t5x_enc_dec_attention_module["query"]["kernel"]
137
+ t5x_enc_dec_attention_value = t5x_enc_dec_attention_module["value"]["kernel"]
138
+
139
+ # Layer Normalization
140
+ t5x_cross_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
141
+
142
+ # MLP
143
+ if split_mlp_wi:
144
+ t5x_mlp_wi_0 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
145
+ t5x_mlp_wi_1 = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
146
+ else:
147
+ t5x_mlp_wi = t5x_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
148
+
149
+ t5x_mlp_wo = t5x_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
150
+
151
+ # Layer Normalization
152
+ tx5_mlp_layer_norm = t5x_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
153
+
154
+ # Assigning
155
+ flax_model_decoder_layer_block = flax_model.params["decoder"]["block"][str(layer_index)]["layer"]
156
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["k"]["kernel"] = t5x_attention_key
157
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["o"]["kernel"] = t5x_attention_out
158
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["q"]["kernel"] = t5x_attention_query
159
+ flax_model_decoder_layer_block["0"]["SelfAttention"]["v"]["kernel"] = t5x_attention_value
160
+
161
+ flax_model_decoder_layer_block["0"]["layer_norm"]["weight"] = t5x_pre_attention_layer_norm
162
+
163
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["k"]["kernel"] = t5x_enc_dec_attention_key
164
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["o"]["kernel"] = t5x_enc_dec_attention_out
165
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["q"]["kernel"] = t5x_enc_dec_attention_query
166
+ flax_model_decoder_layer_block["1"]["EncDecAttention"]["v"]["kernel"] = t5x_enc_dec_attention_value
167
+
168
+ flax_model_decoder_layer_block["1"]["layer_norm"]["weight"] = t5x_cross_layer_norm
169
+
170
+ if split_mlp_wi:
171
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_0"]["kernel"] = t5x_mlp_wi_0
172
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi_1"]["kernel"] = t5x_mlp_wi_1
173
+ else:
174
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wi"]["kernel"] = t5x_mlp_wi
175
+
176
+ flax_model_decoder_layer_block["2"]["DenseReluDense"]["wo"]["kernel"] = t5x_mlp_wo
177
+
178
+ flax_model_decoder_layer_block["2"]["layer_norm"]["weight"] = tx5_mlp_layer_norm
179
+
180
+ flax_model.params["decoder"]["block"][str(layer_index)]["layer"] = flax_model_decoder_layer_block
181
+
182
+ # Decoder Normalization
183
+ tx5_decoder_norm = t5x_model["target"]["decoder"]["decoder_norm"]["scale"]
184
+ flax_model.params["decoder"]["final_layer_norm"]["weight"] = tx5_decoder_norm
185
+
186
+ # Only for layer 0:
187
+ t5x_decoder_rel_embedding = t5x_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
188
+ flax_model.params["decoder"]["block"]["0"]["layer"]["0"]["SelfAttention"]["relative_attention_bias"][
189
+ "embedding"
190
+ ] = t5x_decoder_rel_embedding
191
+
192
+ # Token Embeddings
193
+ tx5_token_embeddings = t5x_model["target"]["token_embedder"]["embedding"]
194
+ flax_model.params["shared"]["embedding"] = tx5_token_embeddings
195
+
196
+ # LM Head (only in v1.1 and LongT5 checkpoints)
197
+ if "logits_dense" in t5x_model["target"]["decoder"]:
198
+ flax_model.params["lm_head"]["kernel"] = t5x_model["target"]["decoder"]["logits_dense"]["kernel"]
199
+
200
+ flax_model.save_pretrained(flax_dump_folder_path)
201
+ print("T5X Model was sucessfully converted!")
202
+
203
+
204
+ if __name__ == "__main__":
205
+ parser = argparse.ArgumentParser()
206
+ # Required parameters
207
+ parser.add_argument(
208
+ "--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
209
+ )
210
+ parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
211
+ parser.add_argument(
212
+ "--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
213
+ )
214
+ args = parser.parse_args()
215
+ convert_t5x_checkpoint_to_flax(args.t5x_checkpoint_path, args.config_name, args.flax_dump_folder_path)
llmeval-env/lib/python3.10/site-packages/transformers/models/longt5/modeling_flax_longt5.py ADDED
The diff for this file is too large to render. See raw diff