applied-ai-018 commited on
Commit
13c6267
·
verified ·
1 Parent(s): 03a01d6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/configuration_utils.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/debug_utils.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_tf_utils.cpython-310.pyc +0 -0
  7. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/hyperparameter_search.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_processing_utils.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/keras_callbacks.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_attn_mask_utils.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_outputs.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_pytorch_utils.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_utils.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_outputs.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_pytorch_utils.cpython-310.pyc +0 -0
  16. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_utils.cpython-310.pyc +0 -0
  17. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_utils.cpython-310.pyc +0 -0
  18. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc +0 -0
  19. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/processing_utils.cpython-310.pyc +0 -0
  20. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc +0 -0
  21. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/testing_utils.cpython-310.pyc +0 -0
  22. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer.cpython-310.pyc +0 -0
  23. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_seq2seq.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_utils.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args_tf.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/transformers/generation/__init__.py +306 -0
  28. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc +0 -0
  32. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc +0 -0
  33. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc +0 -0
  34. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/transformers/generation/beam_constraints.py +521 -0
  42. env-llmeval/lib/python3.10/site-packages/transformers/generation/beam_search.py +1005 -0
  43. env-llmeval/lib/python3.10/site-packages/transformers/generation/candidate_generator.py +410 -0
  44. env-llmeval/lib/python3.10/site-packages/transformers/generation/configuration_utils.py +1092 -0
  45. env-llmeval/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py +457 -0
  46. env-llmeval/lib/python3.10/site-packages/transformers/generation/flax_utils.py +1019 -0
  47. env-llmeval/lib/python3.10/site-packages/transformers/generation/logits_process.py +0 -0
  48. env-llmeval/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py +157 -0
  49. env-llmeval/lib/python3.10/site-packages/transformers/generation/streamers.py +227 -0
  50. env-llmeval/lib/python3.10/site-packages/transformers/generation/tf_logits_process.py +591 -0
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/cache_utils.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/configuration_utils.cpython-310.pyc ADDED
Binary file (44.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/convert_pytorch_checkpoint_to_tf2.cpython-310.pyc ADDED
Binary file (9.84 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/debug_utils.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/feature_extraction_utils.cpython-310.pyc ADDED
Binary file (23.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/generation_tf_utils.cpython-310.pyc ADDED
Binary file (657 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/hyperparameter_search.cpython-310.pyc ADDED
Binary file (5.05 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/image_processing_utils.cpython-310.pyc ADDED
Binary file (29 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/keras_callbacks.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_attn_mask_utils.cpython-310.pyc ADDED
Binary file (12.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_outputs.cpython-310.pyc ADDED
Binary file (41.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_pytorch_utils.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_flax_utils.cpython-310.pyc ADDED
Binary file (40.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_outputs.cpython-310.pyc ADDED
Binary file (110 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_pytorch_utils.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_tf_utils.cpython-310.pyc ADDED
Binary file (105 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/modeling_utils.cpython-310.pyc ADDED
Binary file (146 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/optimization.cpython-310.pyc ADDED
Binary file (25.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/processing_utils.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/safetensors_conversion.cpython-310.pyc ADDED
Binary file (3.41 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/testing_utils.cpython-310.pyc ADDED
Binary file (74.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer.cpython-310.pyc ADDED
Binary file (116 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_seq2seq.cpython-310.pyc ADDED
Binary file (11.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/trainer_utils.cpython-310.pyc ADDED
Binary file (24.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/__pycache__/training_args_tf.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (748 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__init__.py ADDED
@@ -0,0 +1,306 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_utils": ["GenerationConfig", "GenerationMode"],
22
+ "streamers": ["TextIteratorStreamer", "TextStreamer"],
23
+ }
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["beam_constraints"] = [
32
+ "Constraint",
33
+ "ConstraintListState",
34
+ "DisjunctiveConstraint",
35
+ "PhrasalConstraint",
36
+ ]
37
+ _import_structure["beam_search"] = [
38
+ "BeamHypotheses",
39
+ "BeamScorer",
40
+ "BeamSearchScorer",
41
+ "ConstrainedBeamSearchScorer",
42
+ ]
43
+ _import_structure["candidate_generator"] = [
44
+ "AssistedCandidateGenerator",
45
+ "CandidateGenerator",
46
+ "PromptLookupCandidateGenerator",
47
+ ]
48
+ _import_structure["logits_process"] = [
49
+ "AlternatingCodebooksLogitsProcessor",
50
+ "ClassifierFreeGuidanceLogitsProcessor",
51
+ "EncoderNoRepeatNGramLogitsProcessor",
52
+ "EncoderRepetitionPenaltyLogitsProcessor",
53
+ "EpsilonLogitsWarper",
54
+ "EtaLogitsWarper",
55
+ "ExponentialDecayLengthPenalty",
56
+ "ForcedBOSTokenLogitsProcessor",
57
+ "ForcedEOSTokenLogitsProcessor",
58
+ "ForceTokensLogitsProcessor",
59
+ "HammingDiversityLogitsProcessor",
60
+ "InfNanRemoveLogitsProcessor",
61
+ "LogitNormalization",
62
+ "LogitsProcessor",
63
+ "LogitsProcessorList",
64
+ "LogitsWarper",
65
+ "MinLengthLogitsProcessor",
66
+ "MinNewTokensLengthLogitsProcessor",
67
+ "NoBadWordsLogitsProcessor",
68
+ "NoRepeatNGramLogitsProcessor",
69
+ "PrefixConstrainedLogitsProcessor",
70
+ "RepetitionPenaltyLogitsProcessor",
71
+ "SequenceBiasLogitsProcessor",
72
+ "SuppressTokensLogitsProcessor",
73
+ "SuppressTokensAtBeginLogitsProcessor",
74
+ "TemperatureLogitsWarper",
75
+ "TopKLogitsWarper",
76
+ "TopPLogitsWarper",
77
+ "TypicalLogitsWarper",
78
+ "UnbatchedClassifierFreeGuidanceLogitsProcessor",
79
+ "WhisperTimeStampLogitsProcessor",
80
+ ]
81
+ _import_structure["stopping_criteria"] = [
82
+ "MaxNewTokensCriteria",
83
+ "MaxLengthCriteria",
84
+ "MaxTimeCriteria",
85
+ "StoppingCriteria",
86
+ "StoppingCriteriaList",
87
+ "validate_stopping_criteria",
88
+ ]
89
+ _import_structure["utils"] = [
90
+ "GenerationMixin",
91
+ "GreedySearchEncoderDecoderOutput",
92
+ "GreedySearchDecoderOnlyOutput",
93
+ "SampleEncoderDecoderOutput",
94
+ "SampleDecoderOnlyOutput",
95
+ "BeamSearchEncoderDecoderOutput",
96
+ "BeamSearchDecoderOnlyOutput",
97
+ "BeamSampleEncoderDecoderOutput",
98
+ "BeamSampleDecoderOnlyOutput",
99
+ "ContrastiveSearchEncoderDecoderOutput",
100
+ "ContrastiveSearchDecoderOnlyOutput",
101
+ "GenerateBeamDecoderOnlyOutput",
102
+ "GenerateBeamEncoderDecoderOutput",
103
+ "GenerateDecoderOnlyOutput",
104
+ "GenerateEncoderDecoderOutput",
105
+ ]
106
+
107
+ try:
108
+ if not is_tf_available():
109
+ raise OptionalDependencyNotAvailable()
110
+ except OptionalDependencyNotAvailable:
111
+ pass
112
+ else:
113
+ _import_structure["tf_logits_process"] = [
114
+ "TFForcedBOSTokenLogitsProcessor",
115
+ "TFForcedEOSTokenLogitsProcessor",
116
+ "TFForceTokensLogitsProcessor",
117
+ "TFLogitsProcessor",
118
+ "TFLogitsProcessorList",
119
+ "TFLogitsWarper",
120
+ "TFMinLengthLogitsProcessor",
121
+ "TFNoBadWordsLogitsProcessor",
122
+ "TFNoRepeatNGramLogitsProcessor",
123
+ "TFRepetitionPenaltyLogitsProcessor",
124
+ "TFSuppressTokensAtBeginLogitsProcessor",
125
+ "TFSuppressTokensLogitsProcessor",
126
+ "TFTemperatureLogitsWarper",
127
+ "TFTopKLogitsWarper",
128
+ "TFTopPLogitsWarper",
129
+ ]
130
+ _import_structure["tf_utils"] = [
131
+ "TFGenerationMixin",
132
+ "TFGreedySearchDecoderOnlyOutput",
133
+ "TFGreedySearchEncoderDecoderOutput",
134
+ "TFSampleEncoderDecoderOutput",
135
+ "TFSampleDecoderOnlyOutput",
136
+ "TFBeamSearchEncoderDecoderOutput",
137
+ "TFBeamSearchDecoderOnlyOutput",
138
+ "TFBeamSampleEncoderDecoderOutput",
139
+ "TFBeamSampleDecoderOnlyOutput",
140
+ "TFContrastiveSearchEncoderDecoderOutput",
141
+ "TFContrastiveSearchDecoderOnlyOutput",
142
+ ]
143
+
144
+ try:
145
+ if not is_flax_available():
146
+ raise OptionalDependencyNotAvailable()
147
+ except OptionalDependencyNotAvailable:
148
+ pass
149
+ else:
150
+ _import_structure["flax_logits_process"] = [
151
+ "FlaxForcedBOSTokenLogitsProcessor",
152
+ "FlaxForcedEOSTokenLogitsProcessor",
153
+ "FlaxForceTokensLogitsProcessor",
154
+ "FlaxLogitsProcessor",
155
+ "FlaxLogitsProcessorList",
156
+ "FlaxLogitsWarper",
157
+ "FlaxMinLengthLogitsProcessor",
158
+ "FlaxSuppressTokensAtBeginLogitsProcessor",
159
+ "FlaxSuppressTokensLogitsProcessor",
160
+ "FlaxTemperatureLogitsWarper",
161
+ "FlaxTopKLogitsWarper",
162
+ "FlaxTopPLogitsWarper",
163
+ "FlaxWhisperTimeStampLogitsProcessor",
164
+ ]
165
+ _import_structure["flax_utils"] = [
166
+ "FlaxGenerationMixin",
167
+ "FlaxGreedySearchOutput",
168
+ "FlaxSampleOutput",
169
+ "FlaxBeamSearchOutput",
170
+ ]
171
+
172
+ if TYPE_CHECKING:
173
+ from .configuration_utils import GenerationConfig, GenerationMode
174
+ from .streamers import TextIteratorStreamer, TextStreamer
175
+
176
+ try:
177
+ if not is_torch_available():
178
+ raise OptionalDependencyNotAvailable()
179
+ except OptionalDependencyNotAvailable:
180
+ pass
181
+ else:
182
+ from .beam_constraints import Constraint, ConstraintListState, DisjunctiveConstraint, PhrasalConstraint
183
+ from .beam_search import BeamHypotheses, BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer
184
+ from .candidate_generator import AssistedCandidateGenerator, CandidateGenerator, PromptLookupCandidateGenerator
185
+ from .logits_process import (
186
+ AlternatingCodebooksLogitsProcessor,
187
+ ClassifierFreeGuidanceLogitsProcessor,
188
+ EncoderNoRepeatNGramLogitsProcessor,
189
+ EncoderRepetitionPenaltyLogitsProcessor,
190
+ EpsilonLogitsWarper,
191
+ EtaLogitsWarper,
192
+ ExponentialDecayLengthPenalty,
193
+ ForcedBOSTokenLogitsProcessor,
194
+ ForcedEOSTokenLogitsProcessor,
195
+ ForceTokensLogitsProcessor,
196
+ HammingDiversityLogitsProcessor,
197
+ InfNanRemoveLogitsProcessor,
198
+ LogitNormalization,
199
+ LogitsProcessor,
200
+ LogitsProcessorList,
201
+ LogitsWarper,
202
+ MinLengthLogitsProcessor,
203
+ MinNewTokensLengthLogitsProcessor,
204
+ NoBadWordsLogitsProcessor,
205
+ NoRepeatNGramLogitsProcessor,
206
+ PrefixConstrainedLogitsProcessor,
207
+ RepetitionPenaltyLogitsProcessor,
208
+ SequenceBiasLogitsProcessor,
209
+ SuppressTokensAtBeginLogitsProcessor,
210
+ SuppressTokensLogitsProcessor,
211
+ TemperatureLogitsWarper,
212
+ TopKLogitsWarper,
213
+ TopPLogitsWarper,
214
+ TypicalLogitsWarper,
215
+ UnbatchedClassifierFreeGuidanceLogitsProcessor,
216
+ WhisperTimeStampLogitsProcessor,
217
+ )
218
+ from .stopping_criteria import (
219
+ MaxLengthCriteria,
220
+ MaxNewTokensCriteria,
221
+ MaxTimeCriteria,
222
+ StoppingCriteria,
223
+ StoppingCriteriaList,
224
+ validate_stopping_criteria,
225
+ )
226
+ from .utils import (
227
+ BeamSampleDecoderOnlyOutput,
228
+ BeamSampleEncoderDecoderOutput,
229
+ BeamSearchDecoderOnlyOutput,
230
+ BeamSearchEncoderDecoderOutput,
231
+ ContrastiveSearchDecoderOnlyOutput,
232
+ ContrastiveSearchEncoderDecoderOutput,
233
+ GenerateBeamDecoderOnlyOutput,
234
+ GenerateBeamEncoderDecoderOutput,
235
+ GenerateDecoderOnlyOutput,
236
+ GenerateEncoderDecoderOutput,
237
+ GenerationMixin,
238
+ GreedySearchDecoderOnlyOutput,
239
+ GreedySearchEncoderDecoderOutput,
240
+ SampleDecoderOnlyOutput,
241
+ SampleEncoderDecoderOutput,
242
+ )
243
+
244
+ try:
245
+ if not is_tf_available():
246
+ raise OptionalDependencyNotAvailable()
247
+ except OptionalDependencyNotAvailable:
248
+ pass
249
+ else:
250
+ from .tf_logits_process import (
251
+ TFForcedBOSTokenLogitsProcessor,
252
+ TFForcedEOSTokenLogitsProcessor,
253
+ TFForceTokensLogitsProcessor,
254
+ TFLogitsProcessor,
255
+ TFLogitsProcessorList,
256
+ TFLogitsWarper,
257
+ TFMinLengthLogitsProcessor,
258
+ TFNoBadWordsLogitsProcessor,
259
+ TFNoRepeatNGramLogitsProcessor,
260
+ TFRepetitionPenaltyLogitsProcessor,
261
+ TFSuppressTokensAtBeginLogitsProcessor,
262
+ TFSuppressTokensLogitsProcessor,
263
+ TFTemperatureLogitsWarper,
264
+ TFTopKLogitsWarper,
265
+ TFTopPLogitsWarper,
266
+ )
267
+ from .tf_utils import (
268
+ TFBeamSampleDecoderOnlyOutput,
269
+ TFBeamSampleEncoderDecoderOutput,
270
+ TFBeamSearchDecoderOnlyOutput,
271
+ TFBeamSearchEncoderDecoderOutput,
272
+ TFContrastiveSearchDecoderOnlyOutput,
273
+ TFContrastiveSearchEncoderDecoderOutput,
274
+ TFGenerationMixin,
275
+ TFGreedySearchDecoderOnlyOutput,
276
+ TFGreedySearchEncoderDecoderOutput,
277
+ TFSampleDecoderOnlyOutput,
278
+ TFSampleEncoderDecoderOutput,
279
+ )
280
+
281
+ try:
282
+ if not is_flax_available():
283
+ raise OptionalDependencyNotAvailable()
284
+ except OptionalDependencyNotAvailable:
285
+ pass
286
+ else:
287
+ from .flax_logits_process import (
288
+ FlaxForcedBOSTokenLogitsProcessor,
289
+ FlaxForcedEOSTokenLogitsProcessor,
290
+ FlaxForceTokensLogitsProcessor,
291
+ FlaxLogitsProcessor,
292
+ FlaxLogitsProcessorList,
293
+ FlaxLogitsWarper,
294
+ FlaxMinLengthLogitsProcessor,
295
+ FlaxSuppressTokensAtBeginLogitsProcessor,
296
+ FlaxSuppressTokensLogitsProcessor,
297
+ FlaxTemperatureLogitsWarper,
298
+ FlaxTopKLogitsWarper,
299
+ FlaxTopPLogitsWarper,
300
+ FlaxWhisperTimeStampLogitsProcessor,
301
+ )
302
+ from .flax_utils import FlaxBeamSearchOutput, FlaxGenerationMixin, FlaxGreedySearchOutput, FlaxSampleOutput
303
+ else:
304
+ import sys
305
+
306
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.78 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc ADDED
Binary file (28.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc ADDED
Binary file (44.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc ADDED
Binary file (18 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc ADDED
Binary file (27.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc ADDED
Binary file (97.6 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc ADDED
Binary file (7.56 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc ADDED
Binary file (7.78 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc ADDED
Binary file (26.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc ADDED
Binary file (104 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc ADDED
Binary file (153 kB). View file
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/beam_constraints.py ADDED
@@ -0,0 +1,521 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Optional
3
+
4
+
5
+ class Constraint(ABC):
6
+ r"""Abstract base class for all constraints that can be applied during generation.
7
+ It must define how the constraint can be satisfied.
8
+
9
+ All classes that inherit Constraint must follow the requirement that
10
+
11
+ ```py
12
+ completed = False
13
+ while not completed:
14
+ _, completed = constraint.update(constraint.advance())
15
+ ```
16
+
17
+ will always terminate (halt).
18
+ """
19
+
20
+ def __init__(self):
21
+ # test for the above condition
22
+ self.test()
23
+
24
+ def test(self):
25
+ """
26
+ Tests whether this constraint has been properly defined.
27
+ """
28
+ counter = 0
29
+ completed = False
30
+ while not completed:
31
+ if counter == 1:
32
+ self.reset()
33
+ advance = self.advance()
34
+ if not self.does_advance(advance):
35
+ raise Exception(
36
+ "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true."
37
+ )
38
+
39
+ stepped, completed, reset = self.update(advance)
40
+ counter += 1
41
+
42
+ if counter > 10000:
43
+ raise Exception("update() does not fulfill the constraint.")
44
+
45
+ if self.remaining() != 0:
46
+ raise Exception("Custom Constraint is not defined correctly.")
47
+
48
+ @abstractmethod
49
+ def advance(self):
50
+ """
51
+ When called, returns the token that would take this constraint one step closer to being fulfilled.
52
+
53
+ Return:
54
+ token_ids(`torch.tensor`): Must be a tensor of a list of indexable tokens, not some integer.
55
+ """
56
+ raise NotImplementedError(
57
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
58
+ )
59
+
60
+ @abstractmethod
61
+ def does_advance(self, token_id: int):
62
+ """
63
+ Reads in a token and returns whether it creates progress.
64
+ """
65
+ raise NotImplementedError(
66
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
67
+ )
68
+
69
+ @abstractmethod
70
+ def update(self, token_id: int):
71
+ """
72
+ Reads in a token and returns booleans that indicate the progress made by it. This function will update the
73
+ state of this object unlikes `does_advance(self, token_id: int)`.
74
+
75
+ This isn't to test whether a certain token will advance the progress; it's to update its state as if it has
76
+ been generated. This becomes important if token_id != desired token (refer to else statement in
77
+ PhrasalConstraint)
78
+
79
+ Args:
80
+ token_id(`int`):
81
+ The id of a newly generated token in the beam search.
82
+ Return:
83
+ stepped(`bool`):
84
+ Whether this constraint has become one step closer to being fulfuilled.
85
+ completed(`bool`):
86
+ Whether this constraint has been completely fulfilled by this token being generated.
87
+ reset (`bool`):
88
+ Whether this constraint has reset its progress by this token being generated.
89
+ """
90
+ raise NotImplementedError(
91
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
92
+ )
93
+
94
+ @abstractmethod
95
+ def reset(self):
96
+ """
97
+ Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of
98
+ a constraint is abrupted by an unwanted token.
99
+ """
100
+ raise NotImplementedError(
101
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
102
+ )
103
+
104
+ @abstractmethod
105
+ def remaining(self):
106
+ """
107
+ Returns the number of remaining steps of `advance()` in order to complete this constraint.
108
+ """
109
+ raise NotImplementedError(
110
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
111
+ )
112
+
113
+ @abstractmethod
114
+ def copy(self, stateful=False):
115
+ """
116
+ Creates a new instance of this constraint.
117
+
118
+ Args:
119
+ stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state.
120
+
121
+ Return:
122
+ constraint(`Constraint`): The same constraint as the one being called from.
123
+ """
124
+ raise NotImplementedError(
125
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
126
+ )
127
+
128
+
129
+ class PhrasalConstraint(Constraint):
130
+ r"""
131
+ [`Constraint`] enforcing that an ordered sequence of tokens is included in the output.
132
+
133
+ Args:
134
+ token_ids (`List[int]`):
135
+ The id of the token that must be generated by the output.
136
+ """
137
+
138
+ def __init__(self, token_ids: List[int]):
139
+ super(Constraint, self).__init__()
140
+
141
+ if not isinstance(token_ids, list) or len(token_ids) == 0:
142
+ raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.")
143
+ if any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids):
144
+ raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.")
145
+
146
+ self.token_ids = token_ids
147
+
148
+ self.seqlen = len(self.token_ids)
149
+ self.fulfilled_idx = -1 # the index of the currently fulfilled step
150
+ self.completed = False
151
+
152
+ def advance(self):
153
+ if self.completed:
154
+ return None
155
+ return self.token_ids[self.fulfilled_idx + 1]
156
+
157
+ def does_advance(self, token_id: int):
158
+ if not isinstance(token_id, int):
159
+ raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
160
+
161
+ if self.completed:
162
+ return False
163
+
164
+ return token_id == self.token_ids[self.fulfilled_idx + 1]
165
+
166
+ def update(self, token_id: int):
167
+ if not isinstance(token_id, int):
168
+ raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
169
+
170
+ stepped = False
171
+ completed = False
172
+ reset = False
173
+
174
+ if self.does_advance(token_id):
175
+ self.fulfilled_idx += 1
176
+ stepped = True
177
+ if self.fulfilled_idx == (self.seqlen - 1):
178
+ completed = True
179
+ self.completed = completed
180
+ else:
181
+ # failed to make progress.
182
+ reset = True
183
+ self.reset()
184
+ return stepped, completed, reset
185
+
186
+ def reset(self):
187
+ self.completed = False
188
+ self.fulfilled_idx = 0
189
+
190
+ def remaining(self):
191
+ return self.seqlen - (self.fulfilled_idx + 1)
192
+
193
+ def copy(self, stateful=False):
194
+ new_constraint = PhrasalConstraint(self.token_ids)
195
+
196
+ if stateful:
197
+ new_constraint.seq_len = self.seqlen
198
+ new_constraint.fulfilled_idx = self.fulfilled_idx
199
+ new_constraint.completed = self.completed
200
+
201
+ return new_constraint
202
+
203
+
204
+ class DisjunctiveTrie:
205
+ def __init__(self, nested_token_ids: List[List[int]], no_subsets=True):
206
+ r"""
207
+ A helper class that builds a trie with the words represented in `nested_token_ids`.
208
+ """
209
+ self.max_height = max([len(one) for one in nested_token_ids])
210
+
211
+ root = {}
212
+ for token_ids in nested_token_ids:
213
+ level = root
214
+ for tidx, token_id in enumerate(token_ids):
215
+ if token_id not in level:
216
+ level[token_id] = {}
217
+
218
+ level = level[token_id]
219
+
220
+ if no_subsets and self.has_subsets(root, nested_token_ids):
221
+ raise ValueError(
222
+ "Each list in `nested_token_ids` can't be a complete subset of another list, but is"
223
+ f" {nested_token_ids}."
224
+ )
225
+
226
+ self.trie = root
227
+
228
+ def next_tokens(self, current_seq):
229
+ """
230
+ The next possible tokens that will progress the trie, given the current sequence of tokens in `current_seq`.
231
+ """
232
+ start = self.trie
233
+
234
+ for current_token in current_seq:
235
+ start = start[current_token]
236
+
237
+ next_tokens = list(start.keys())
238
+
239
+ return next_tokens
240
+
241
+ def reached_leaf(self, current_seq):
242
+ next_tokens = self.next_tokens(current_seq)
243
+
244
+ return len(next_tokens) == 0
245
+
246
+ def count_leaves(self, root):
247
+ next_nodes = list(root.values())
248
+ if len(next_nodes) == 0:
249
+ return 1
250
+ else:
251
+ return sum([self.count_leaves(nn) for nn in next_nodes])
252
+
253
+ def has_subsets(self, trie, nested_token_ids):
254
+ """
255
+ Returns whether # of leaves == # of words. Otherwise some word is a subset of another.
256
+ """
257
+ leaf_count = self.count_leaves(trie)
258
+ return len(nested_token_ids) != leaf_count
259
+
260
+
261
+ class DisjunctiveConstraint(Constraint):
262
+ r"""
263
+ A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints.
264
+
265
+ Args:
266
+ nested_token_ids (`List[List[int]]`):
267
+ A list of words, where each word is a list of ids. This constraint is fulfilled by generating just one from
268
+ the list of words.
269
+ """
270
+
271
+ def __init__(self, nested_token_ids: List[List[int]]):
272
+ super(Constraint, self).__init__()
273
+
274
+ if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0:
275
+ raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.")
276
+ if any(not isinstance(token_ids, list) for token_ids in nested_token_ids):
277
+ raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.")
278
+ if any(
279
+ any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)
280
+ for token_ids in nested_token_ids
281
+ ):
282
+ raise ValueError(
283
+ f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}."
284
+ )
285
+
286
+ self.trie = DisjunctiveTrie(nested_token_ids)
287
+ self.token_ids = nested_token_ids
288
+
289
+ self.seqlen = self.trie.max_height
290
+ self.current_seq = []
291
+ self.completed = False
292
+
293
+ def advance(self):
294
+ token_list = self.trie.next_tokens(self.current_seq)
295
+
296
+ if len(token_list) == 0:
297
+ return None
298
+ else:
299
+ return token_list
300
+
301
+ def does_advance(self, token_id: int):
302
+ if not isinstance(token_id, int):
303
+ raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
304
+
305
+ next_tokens = self.trie.next_tokens(self.current_seq)
306
+
307
+ return token_id in next_tokens
308
+
309
+ def update(self, token_id: int):
310
+ if not isinstance(token_id, int):
311
+ raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
312
+
313
+ stepped = False
314
+ completed = False
315
+ reset = False
316
+
317
+ if self.does_advance(token_id):
318
+ self.current_seq.append(token_id)
319
+ stepped = True
320
+ else:
321
+ reset = True
322
+ self.reset()
323
+
324
+ completed = self.trie.reached_leaf(self.current_seq)
325
+ self.completed = completed
326
+
327
+ return stepped, completed, reset
328
+
329
+ def reset(self):
330
+ self.completed = False
331
+ self.current_seq = []
332
+
333
+ def remaining(self):
334
+ if self.completed:
335
+ # since this can be completed without reaching max height
336
+ return 0
337
+ else:
338
+ return self.seqlen - len(self.current_seq)
339
+
340
+ def copy(self, stateful=False):
341
+ new_constraint = DisjunctiveConstraint(self.token_ids)
342
+
343
+ if stateful:
344
+ new_constraint.seq_len = self.seqlen
345
+ new_constraint.current_seq = self.current_seq
346
+ new_constraint.completed = self.completed
347
+
348
+ return new_constraint
349
+
350
+
351
+ class ConstraintListState:
352
+ r"""
353
+ A class for beam scorers to track its progress through a list of constraints.
354
+
355
+ Args:
356
+ constraints (`List[Constraint]`):
357
+ A list of [`Constraint`] objects that must be fulfilled by the beam scorer.
358
+ """
359
+
360
+ def __init__(self, constraints: List[Constraint]):
361
+ self.constraints = constraints
362
+
363
+ # max # of steps required to fulfill a given constraint
364
+ self.max_seqlen = max([c.seqlen for c in constraints])
365
+ self.n_constraints = len(constraints)
366
+ self.completed = False
367
+
368
+ self.init_state()
369
+
370
+ def init_state(self):
371
+ self.complete_constraints = []
372
+ self.inprogress_constraint = None
373
+ self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints]
374
+
375
+ def get_bank(self):
376
+ add = 0
377
+ if self.inprogress_constraint:
378
+ # extra points for having a constraint mid-fulfilled
379
+ add += self.max_seqlen - self.inprogress_constraint.remaining()
380
+
381
+ return (len(self.complete_constraints) * self.max_seqlen) + add
382
+
383
+ def advance(self):
384
+ """The list of tokens to generate such that we can make progress.
385
+ By "list" we don't mean the list of token that will fully fulfill a constraint.
386
+
387
+ Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a
388
+ specific constraint `c_i`, we return:
389
+
390
+ `[t_k1 for k in indices of unfulfilled constraints]`
391
+
392
+ If we are in the middle of a constraint, then we return:
393
+ `[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint.
394
+
395
+ Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint,
396
+ that's the only one we'll return.
397
+ """
398
+ token_list = []
399
+ if self.inprogress_constraint is None:
400
+ for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
401
+ advance = constraint.advance()
402
+ if isinstance(advance, int):
403
+ token_list.append(advance)
404
+ elif isinstance(advance, list):
405
+ token_list.extend(advance)
406
+ else:
407
+ advance = self.inprogress_constraint.advance()
408
+ if isinstance(advance, int):
409
+ token_list.append(advance)
410
+ elif isinstance(advance, list):
411
+ token_list.extend(advance)
412
+
413
+ if len(token_list) == 0:
414
+ return None
415
+ else:
416
+ return token_list
417
+
418
+ def reset(self, token_ids: Optional[List[int]]):
419
+ """
420
+ token_ids: the tokens generated thus far to reset the state of the progress through constraints.
421
+ """
422
+ self.init_state()
423
+
424
+ if token_ids is not None:
425
+ for token in token_ids:
426
+ # completes or steps **one** constraint
427
+ complete, stepped = self.add(token)
428
+
429
+ # the entire list of constraints are fulfilled
430
+ if self.completed:
431
+ break
432
+
433
+ def add(self, token_id: int):
434
+ if not isinstance(token_id, int):
435
+ raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.")
436
+
437
+ complete, stepped = False, False
438
+
439
+ if self.completed:
440
+ complete = True
441
+ stepped = False
442
+ return complete, stepped
443
+
444
+ if self.inprogress_constraint is not None:
445
+ # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
446
+ # job, simply update the state
447
+
448
+ stepped, complete, reset = self.inprogress_constraint.update(token_id)
449
+ if reset:
450
+ # 1. If the next token breaks the progress, then we must restart.
451
+ # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
452
+
453
+ # But that doesn't mean we self.init_state(), since we only reset the state for this particular
454
+ # constraint, not the full list of constraints.
455
+
456
+ self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False))
457
+ self.inprogress_constraint = None
458
+
459
+ if complete:
460
+ # 2. If the next token completes the constraint, move it to completed list, set
461
+ # inprogress to None. If there are no pending constraints either, then this full list of constraints
462
+ # is complete.
463
+
464
+ self.complete_constraints.append(self.inprogress_constraint)
465
+ self.inprogress_constraint = None
466
+
467
+ if len(self.pending_constraints) == 0:
468
+ # we're done!
469
+ self.completed = True
470
+
471
+ else:
472
+ # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
473
+ # of constraints?
474
+
475
+ for cidx, pending_constraint in enumerate(self.pending_constraints):
476
+ if pending_constraint.does_advance(token_id):
477
+ stepped, complete, reset = pending_constraint.update(token_id)
478
+
479
+ if not stepped:
480
+ raise Exception(
481
+ "`constraint.update(token_id)` is not yielding incremental progress, "
482
+ "even though `constraint.does_advance(token_id)` is true."
483
+ )
484
+
485
+ if complete:
486
+ self.complete_constraints.append(pending_constraint)
487
+ self.inprogress_constraint = None
488
+
489
+ if not complete and stepped:
490
+ self.inprogress_constraint = pending_constraint
491
+
492
+ if complete or stepped:
493
+ # If we made any progress at all, then it's at least not a "pending constraint".
494
+
495
+ self.pending_constraints = (
496
+ self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
497
+ )
498
+
499
+ if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
500
+ # If there's no longer any pending after this and no inprogress either, then we must be
501
+ # complete.
502
+
503
+ self.completed = True
504
+
505
+ break # prevent accidentally stepping through multiple constraints with just one token.
506
+
507
+ return complete, stepped
508
+
509
+ def copy(self, stateful=True):
510
+ new_state = ConstraintListState(self.constraints) # we actually never though self.constraints objects
511
+ # throughout this process. So it's at initialization state.
512
+
513
+ if stateful:
514
+ new_state.complete_constraints = [
515
+ constraint.copy(stateful=True) for constraint in self.complete_constraints
516
+ ]
517
+ if self.inprogress_constraint is not None:
518
+ new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True)
519
+ new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints]
520
+
521
+ return new_state
env-llmeval/lib/python3.10/site-packages/transformers/generation/beam_search.py ADDED
@@ -0,0 +1,1005 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from abc import ABC, abstractmethod
17
+ from collections import UserDict
18
+ from typing import Dict, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+ import torch
22
+
23
+ from ..utils import add_start_docstrings
24
+ from .beam_constraints import Constraint, ConstraintListState
25
+
26
+
27
+ PROCESS_INPUTS_DOCSTRING = r"""
28
+ Args:
29
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
30
+ Indices of input sequence tokens in the vocabulary.
31
+
32
+ Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
33
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
34
+
35
+ [What are input IDs?](../glossary#input-ids)
36
+ next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`):
37
+ Current scores of the top `2 * num_beams` non-finished beam hypotheses.
38
+ next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
39
+ `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses.
40
+ next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
41
+ Beam indices indicating to which beam hypothesis the `next_tokens` correspond.
42
+ pad_token_id (`int`, *optional*):
43
+ The id of the *padding* token.
44
+ eos_token_id (`Union[int, List[int]]`, *optional*):
45
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
46
+ beam_indices (`torch.LongTensor`, *optional*):
47
+ Beam indices indicating to which beam hypothesis each token correspond.
48
+ group_index (`int`, *optional*):
49
+ The index of the group of beams. Used with [`~PreTrainedModel.group_beam_search`].
50
+
51
+ Return:
52
+ `UserDict`: A dictionary composed of the fields as defined above:
53
+
54
+ - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of all
55
+ non-finished beams.
56
+ - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be added
57
+ to the non-finished beam_hypotheses.
58
+ - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices
59
+ indicating to which beam the next tokens shall be added.
60
+
61
+ """
62
+
63
+ FINALIZE_INPUTS_DOCSTRING = r"""
64
+ Args:
65
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
66
+ Indices of input sequence tokens in the vocabulary.
67
+
68
+ Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
69
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
70
+
71
+ [What are input IDs?](../glossary#input-ids)
72
+ final_beam_scores (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
73
+ The final scores of all non-finished beams.
74
+ final_beam_tokens (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
75
+ The last tokens to be added to the non-finished beam_hypotheses.
76
+ final_beam_indices (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
77
+ The beam indices indicating to which beam the `final_beam_tokens` shall be added.
78
+ pad_token_id (`int`, *optional*):
79
+ The id of the *padding* token.
80
+ eos_token_id (`Union[int, List[int]]`, *optional*):
81
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
82
+
83
+ Return:
84
+ `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences.
85
+ The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early
86
+ due to the `eos_token_id`.
87
+
88
+ """
89
+
90
+
91
+ class BeamScorer(ABC):
92
+ """
93
+ Abstract base class for all beam scorers that are used for [`~PreTrainedModel.beam_search`] and
94
+ [`~PreTrainedModel.beam_sample`].
95
+ """
96
+
97
+ @abstractmethod
98
+ @add_start_docstrings(PROCESS_INPUTS_DOCSTRING)
99
+ def process(
100
+ self,
101
+ input_ids: torch.LongTensor,
102
+ next_scores: torch.FloatTensor,
103
+ next_tokens: torch.LongTensor,
104
+ next_indices: torch.LongTensor,
105
+ **kwargs,
106
+ ) -> Tuple[torch.Tensor]:
107
+ raise NotImplementedError("This is an abstract method.")
108
+
109
+ @abstractmethod
110
+ @add_start_docstrings(FINALIZE_INPUTS_DOCSTRING)
111
+ def finalize(
112
+ self,
113
+ input_ids: torch.LongTensor,
114
+ next_scores: torch.FloatTensor,
115
+ next_tokens: torch.LongTensor,
116
+ next_indices: torch.LongTensor,
117
+ max_length: int,
118
+ **kwargs,
119
+ ) -> torch.LongTensor:
120
+ raise NotImplementedError("This is an abstract method.")
121
+
122
+
123
+ class BeamSearchScorer(BeamScorer):
124
+ r"""
125
+ [`BeamScorer`] implementing standard beam search decoding.
126
+
127
+ Adapted in part from [Facebook's XLM beam search
128
+ code](https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529).
129
+
130
+ Reference for the diverse beam search algorithm and implementation [Ashwin Kalyan's DBS
131
+ implementation](https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua)
132
+
133
+ Args:
134
+ batch_size (`int`):
135
+ Batch Size of `input_ids` for which standard beam search decoding is run in parallel.
136
+ num_beams (`int`):
137
+ Number of beams for beam search.
138
+ device (`torch.device`):
139
+ Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be
140
+ allocated.
141
+ length_penalty (`float`, *optional*, defaults to 1.0):
142
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
143
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
144
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
145
+ `length_penalty` < 0.0 encourages shorter sequences.
146
+ do_early_stopping (`bool` or `str`, *optional*, defaults to `False`):
147
+ Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
148
+ `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
149
+ heuristic is applied and the generation stops when is it very unlikely to find better candidates;
150
+ `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
151
+ beam search algorithm).
152
+ num_beam_hyps_to_keep (`int`, *optional*, defaults to 1):
153
+ The number of beam hypotheses that shall be returned upon calling
154
+ [`~transformers.BeamSearchScorer.finalize`].
155
+ num_beam_groups (`int`, *optional*, defaults to 1):
156
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
157
+ See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
158
+ max_length (`int`, *optional*):
159
+ The maximum length of the sequence to be generated.
160
+ """
161
+
162
+ def __init__(
163
+ self,
164
+ batch_size: int,
165
+ num_beams: int,
166
+ device: torch.device,
167
+ length_penalty: Optional[float] = 1.0,
168
+ do_early_stopping: Optional[Union[bool, str]] = False,
169
+ num_beam_hyps_to_keep: Optional[int] = 1,
170
+ num_beam_groups: Optional[int] = 1,
171
+ max_length: Optional[int] = None,
172
+ ):
173
+ self.num_beams = num_beams
174
+ self.device = device
175
+ self.length_penalty = length_penalty
176
+ self.do_early_stopping = do_early_stopping
177
+ self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
178
+ self.num_beam_groups = num_beam_groups
179
+ self.group_size = self.num_beams // self.num_beam_groups
180
+
181
+ self._is_init = False
182
+ # self._beam_hyps[i*self.num_beam_groups+j] is the beam_hyps of the j-th group in the i-th mini-batch.
183
+ # If group_beam_search is not used, the list consists of `batch_size` beam_hyps.
184
+ self._beam_hyps = [
185
+ BeamHypotheses(
186
+ num_beams=self.group_size,
187
+ length_penalty=self.length_penalty,
188
+ early_stopping=self.do_early_stopping,
189
+ max_length=max_length,
190
+ )
191
+ for _ in range(batch_size * self.num_beam_groups)
192
+ ]
193
+ # self._done[i*self.num_beam_groups+j] indicates whether the generation of the beam_hyps of the j-th group
194
+ # in the i-th mini-batch is complete.
195
+ self._done = torch.tensor(
196
+ [False for _ in range(batch_size * self.num_beam_groups)], dtype=torch.bool, device=self.device
197
+ )
198
+
199
+ if not isinstance(num_beams, int) or num_beams <= 1:
200
+ raise ValueError(
201
+ f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1,"
202
+ " one should make use of `greedy_search` instead."
203
+ )
204
+
205
+ if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
206
+ raise ValueError(
207
+ "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be"
208
+ f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
209
+ )
210
+
211
+ @property
212
+ def is_done(self) -> bool:
213
+ return self._done.all()
214
+
215
+ def process(
216
+ self,
217
+ input_ids: torch.LongTensor,
218
+ next_scores: torch.FloatTensor,
219
+ next_tokens: torch.LongTensor,
220
+ next_indices: torch.LongTensor,
221
+ pad_token_id: Optional[int] = None,
222
+ eos_token_id: Optional[Union[int, List[int]]] = None,
223
+ beam_indices: Optional[torch.LongTensor] = None,
224
+ group_index: Optional[int] = 0,
225
+ decoder_prompt_len: Optional[int] = 0,
226
+ ) -> Dict[str, torch.Tensor]:
227
+ # add up to the length which the next_scores is calculated on (including decoder prompt)
228
+ cur_len = input_ids.shape[-1] + 1
229
+ batch_size = len(self._beam_hyps) // self.num_beam_groups
230
+
231
+ if not (batch_size == (input_ids.shape[0] // self.group_size)):
232
+ if self.num_beam_groups > 1:
233
+ raise ValueError(
234
+ f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
235
+ f"size of {self.group_size} is expected by the beam scorer."
236
+ )
237
+ else:
238
+ raise ValueError(
239
+ f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
240
+ f"{self.group_size} is expected by the beam scorer."
241
+ )
242
+
243
+ device = input_ids.device
244
+ next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
245
+ next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
246
+ next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
247
+
248
+ if isinstance(eos_token_id, int):
249
+ eos_token_id = [eos_token_id]
250
+
251
+ for batch_idx in range(batch_size):
252
+ batch_group_idx = batch_idx * self.num_beam_groups + group_index
253
+ if self._done[batch_group_idx]:
254
+ if self.num_beams < len(self._beam_hyps[batch_group_idx]):
255
+ raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
256
+ if eos_token_id is None or pad_token_id is None:
257
+ raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
258
+ # pad the batch
259
+ next_beam_scores[batch_idx, :] = 0
260
+ next_beam_tokens[batch_idx, :] = pad_token_id
261
+ next_beam_indices[batch_idx, :] = 0
262
+ continue
263
+
264
+ # next tokens for this sentence
265
+ beam_idx = 0
266
+ for beam_token_rank, (next_token, next_score, next_index) in enumerate(
267
+ zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
268
+ ):
269
+ batch_beam_idx = batch_idx * self.group_size + next_index
270
+ # add to generated hypotheses if end of sentence
271
+ if (eos_token_id is not None) and (next_token.item() in eos_token_id):
272
+ # if beam_token does not belong to top num_beams tokens, it should not be added
273
+ is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
274
+ if is_beam_token_worse_than_top_num_beams:
275
+ continue
276
+ if beam_indices is not None:
277
+ beam_index = beam_indices[batch_beam_idx]
278
+ beam_index = beam_index + (batch_beam_idx,)
279
+ else:
280
+ beam_index = None
281
+
282
+ self._beam_hyps[batch_group_idx].add(
283
+ input_ids[batch_beam_idx].clone(),
284
+ next_score.item(),
285
+ beam_indices=beam_index,
286
+ generated_len=cur_len - decoder_prompt_len,
287
+ )
288
+ else:
289
+ # add next predicted token since it is not eos_token
290
+ next_beam_scores[batch_idx, beam_idx] = next_score
291
+ next_beam_tokens[batch_idx, beam_idx] = next_token
292
+ next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
293
+ beam_idx += 1
294
+
295
+ # once the beam for next step is full, don't add more tokens to it.
296
+ if beam_idx == self.group_size:
297
+ break
298
+
299
+ if beam_idx < self.group_size:
300
+ raise ValueError(
301
+ f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:"
302
+ f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
303
+ )
304
+
305
+ # Check if we are done so that we can save a pad step if all(done)
306
+ self._done[batch_group_idx] = self._done[batch_group_idx] or self._beam_hyps[batch_group_idx].is_done(
307
+ next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len
308
+ )
309
+
310
+ return UserDict(
311
+ {
312
+ "next_beam_scores": next_beam_scores.view(-1),
313
+ "next_beam_tokens": next_beam_tokens.view(-1),
314
+ "next_beam_indices": next_beam_indices.view(-1),
315
+ }
316
+ )
317
+
318
+ def finalize(
319
+ self,
320
+ input_ids: torch.LongTensor,
321
+ final_beam_scores: torch.FloatTensor,
322
+ final_beam_tokens: torch.LongTensor,
323
+ final_beam_indices: torch.LongTensor,
324
+ max_length: int,
325
+ pad_token_id: Optional[int] = None,
326
+ eos_token_id: Optional[Union[int, List[int]]] = None,
327
+ beam_indices: Optional[torch.LongTensor] = None,
328
+ decoder_prompt_len: Optional[int] = 0,
329
+ ) -> Tuple[torch.LongTensor]:
330
+ batch_size = len(self._beam_hyps) // self.num_beam_groups
331
+
332
+ if isinstance(eos_token_id, int):
333
+ eos_token_id = [eos_token_id]
334
+
335
+ # finalize all open beam hypotheses and add to generated hypotheses
336
+ for batch_group_idx, beam_hyp in enumerate(self._beam_hyps):
337
+ if self._done[batch_group_idx]:
338
+ continue
339
+
340
+ # all open beam hypotheses are added to the beam hypothesis
341
+ # beam hypothesis class automatically keeps the best beams
342
+ for index_per_group in range(self.group_size):
343
+ batch_beam_idx = batch_group_idx * self.group_size + index_per_group
344
+ final_score = final_beam_scores[batch_beam_idx].item()
345
+ final_tokens = input_ids[batch_beam_idx]
346
+ beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None
347
+ generated_len = final_tokens.shape[-1] - decoder_prompt_len
348
+ beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len)
349
+
350
+ # select the best hypotheses
351
+ sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep)
352
+ best = []
353
+ best_indices = []
354
+ best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
355
+
356
+ # retrieve best hypotheses
357
+ for i in range(batch_size):
358
+ beam_hyps_in_batch = self._beam_hyps[i * self.num_beam_groups : (i + 1) * self.num_beam_groups]
359
+ candidate_beams = [beam for beam_hyp in beam_hyps_in_batch for beam in beam_hyp.beams]
360
+ sorted_hyps = sorted(candidate_beams, key=lambda x: x[0])
361
+ for j in range(self.num_beam_hyps_to_keep):
362
+ best_hyp_tuple = sorted_hyps.pop()
363
+ best_score = best_hyp_tuple[0]
364
+ best_hyp = best_hyp_tuple[1]
365
+ best_index = best_hyp_tuple[2]
366
+ sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
367
+
368
+ # append hyp to lists
369
+ best.append(best_hyp)
370
+
371
+ # append indices to list
372
+ best_indices.append(best_index)
373
+
374
+ best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
375
+
376
+ # prepare for adding eos
377
+ sent_lengths_max = sent_lengths.max().item() + 1
378
+ sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max
379
+ decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
380
+
381
+ if len(best_indices) > 0 and best_indices[0] is not None:
382
+ indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
383
+ else:
384
+ indices = None
385
+
386
+ # shorter batches are padded if needed
387
+ if sent_lengths.min().item() != sent_lengths.max().item():
388
+ if pad_token_id is None:
389
+ raise ValueError("`pad_token_id` has to be defined")
390
+ decoded.fill_(pad_token_id)
391
+
392
+ if indices is not None:
393
+ indices.fill_(-1)
394
+
395
+ # fill with hypotheses and eos_token_id if the latter fits in
396
+ for i, (hypo, best_idx) in enumerate(zip(best, best_indices)):
397
+ decoded[i, : sent_lengths[i]] = hypo
398
+
399
+ if indices is not None:
400
+ indices[i, : len(best_idx)] = torch.tensor(best_idx)
401
+
402
+ if sent_lengths[i] < sent_max_len:
403
+ # inserting only the first eos_token_id
404
+ decoded[i, sent_lengths[i]] = eos_token_id[0]
405
+
406
+ return UserDict(
407
+ {
408
+ "sequences": decoded,
409
+ "sequence_scores": best_scores,
410
+ "beam_indices": indices,
411
+ }
412
+ )
413
+
414
+
415
+ class ConstrainedBeamSearchScorer(BeamScorer):
416
+ r"""
417
+ [`BeamScorer`] implementing constrained beam search decoding.
418
+
419
+
420
+ Args:
421
+ batch_size (`int`):
422
+ Batch Size of `input_ids` for which standard beam search decoding is run in parallel.
423
+ num_beams (`int`):
424
+ Number of beams for beam search.
425
+ constraints (`List[Constraint]`):
426
+ A list of positive constraints represented as `Constraint` objects that must be fulfilled in the generation
427
+ output. For more information, the documentation of [`Constraint`] should be read.
428
+ device (`torch.device`):
429
+ Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be
430
+ allocated.
431
+ length_penalty (`float`, *optional*, defaults to 1.0):
432
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
433
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
434
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
435
+ `length_penalty` < 0.0 encourages shorter sequences.
436
+ do_early_stopping (`bool` or `str`, *optional*, defaults to `False`):
437
+ Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
438
+ `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
439
+ heuristic is applied and the generation stops when is it very unlikely to find better candidates;
440
+ `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
441
+ beam search algorithm).
442
+ num_beam_hyps_to_keep (`int`, *optional*, defaults to 1):
443
+ The number of beam hypotheses that shall be returned upon calling
444
+ [`~transformers.BeamSearchScorer.finalize`].
445
+ num_beam_groups (`int`, *optional*, defaults to 1):
446
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
447
+ See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
448
+ max_length (`int`, *optional*):
449
+ The maximum length of the sequence to be generated.
450
+ """
451
+
452
+ def __init__(
453
+ self,
454
+ batch_size: int,
455
+ num_beams: int,
456
+ constraints: List[Constraint],
457
+ device: torch.device,
458
+ length_penalty: Optional[float] = 1.0,
459
+ do_early_stopping: Optional[Union[bool, str]] = False,
460
+ num_beam_hyps_to_keep: Optional[int] = 1,
461
+ num_beam_groups: Optional[int] = 1,
462
+ max_length: Optional[int] = None,
463
+ ):
464
+ self.num_beams = num_beams
465
+ self.device = device
466
+ self.length_penalty = length_penalty
467
+ self.do_early_stopping = do_early_stopping
468
+ self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
469
+ self.num_beam_groups = num_beam_groups
470
+ self.group_size = self.num_beams // self.num_beam_groups
471
+ self.constraints = constraints
472
+
473
+ self._is_init = False
474
+ self._beam_hyps = [
475
+ BeamHypotheses(
476
+ num_beams=self.num_beams,
477
+ length_penalty=self.length_penalty,
478
+ early_stopping=self.do_early_stopping,
479
+ max_length=max_length,
480
+ )
481
+ for _ in range(batch_size)
482
+ ]
483
+ self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device)
484
+
485
+ if not isinstance(num_beams, int) or num_beams <= 1:
486
+ raise ValueError(
487
+ f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1,"
488
+ " one should make use of `greedy_search` instead."
489
+ )
490
+
491
+ if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
492
+ raise ValueError(
493
+ "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be"
494
+ f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
495
+ )
496
+
497
+ @property
498
+ def is_done(self) -> bool:
499
+ return self._done.all()
500
+
501
+ def make_constraint_states(self, n):
502
+ return [ConstraintListState([constraint.copy() for constraint in self.constraints]) for _ in range(n)]
503
+
504
+ def check_completes_constraints(self, sequence):
505
+ new_state = self.make_constraint_states(1)[0]
506
+ new_state.reset(sequence)
507
+ return new_state.completed
508
+
509
+ def process(
510
+ self,
511
+ input_ids: torch.LongTensor,
512
+ next_scores: torch.FloatTensor,
513
+ next_tokens: torch.LongTensor,
514
+ next_indices: torch.LongTensor,
515
+ scores_for_all_vocab: torch.FloatTensor,
516
+ pad_token_id: Optional[int] = None,
517
+ eos_token_id: Optional[Union[int, List[int]]] = None,
518
+ beam_indices: Optional[torch.LongTensor] = None,
519
+ decoder_prompt_len: Optional[int] = 0,
520
+ ) -> Tuple[torch.Tensor]:
521
+ r"""
522
+ Args:
523
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
524
+ Indices of input sequence tokens in the vocabulary.
525
+
526
+ Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
527
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
528
+
529
+ [What are input IDs?](../glossary#input-ids)
530
+ next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`):
531
+ Current scores of the top `2 * num_beams` non-finished beam hypotheses.
532
+ next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
533
+ `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses.
534
+ next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
535
+ Beam indices indicating to which beam hypothesis the `next_tokens` correspond.
536
+ scores_for_all_vocab (`torch.FloatTensor` of shape `(batch_size * num_beams, sequence_length)`):
537
+ The scores of all tokens in the vocabulary for each of the beam hypotheses.
538
+ pad_token_id (`int`, *optional*):
539
+ The id of the *padding* token.
540
+ eos_token_id (`Union[int, List[int]]`, *optional*):
541
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
542
+ beam_indices (`torch.LongTensor`, *optional*):
543
+ Beam indices indicating to which beam hypothesis each token correspond.
544
+ decoder_prompt_len (`int`, *optional*):
545
+ The length of prompt that is included in the input to decoder.
546
+ Return:
547
+ `UserDict`: A dictionary composed of the fields as defined above:
548
+
549
+ - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of
550
+ all
551
+ non-finished beams.
552
+
553
+ - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be
554
+ added
555
+ to the non-finished beam_hypotheses.
556
+ - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices
557
+ indicating to which beam the next tokens shall be added.
558
+ """
559
+
560
+ # add up to the length which the next_scores is calculated on (including decoder prompt)
561
+ cur_len = input_ids.shape[-1] + 1
562
+ batch_size = len(self._beam_hyps)
563
+ if not (batch_size == (input_ids.shape[0] // self.group_size)):
564
+ if self.num_beam_groups > 1:
565
+ raise ValueError(
566
+ f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
567
+ f"size of {self.group_size} is expected by the beam scorer."
568
+ )
569
+ else:
570
+ raise ValueError(
571
+ f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
572
+ f"{self.group_size} is expected by the beam scorer."
573
+ )
574
+
575
+ device = input_ids.device
576
+
577
+ next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
578
+ next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
579
+ next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
580
+
581
+ if isinstance(eos_token_id, int):
582
+ eos_token_id = [eos_token_id]
583
+
584
+ for batch_idx, beam_hyp in enumerate(self._beam_hyps):
585
+ if self._done[batch_idx]:
586
+ if self.num_beams < len(beam_hyp):
587
+ raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
588
+ if eos_token_id is None or pad_token_id is None:
589
+ raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
590
+ # pad the batch
591
+ next_beam_scores[batch_idx, :] = 0
592
+ next_beam_tokens[batch_idx, :] = pad_token_id
593
+ next_beam_indices[batch_idx, :] = 0
594
+ continue
595
+
596
+ # next tokens for this sentence.
597
+ beam_idx = 0
598
+ for beam_token_rank, (next_token, next_score, next_index) in enumerate(
599
+ zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
600
+ ):
601
+ batch_beam_idx = batch_idx * self.group_size + next_index
602
+ # add to generated hypotheses if end of sentence
603
+ if (eos_token_id is not None) and (next_token.item() in eos_token_id):
604
+ # if beam_token does not belong to top num_beams tokens, it should not be added
605
+ is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
606
+ if is_beam_token_worse_than_top_num_beams:
607
+ continue
608
+
609
+ completes_constraint = self.check_completes_constraints(input_ids[batch_beam_idx].cpu().tolist())
610
+ if completes_constraint:
611
+ if beam_indices is not None:
612
+ beam_index = beam_indices[batch_beam_idx]
613
+ beam_index = beam_index + (batch_beam_idx,)
614
+ else:
615
+ beam_index = None
616
+
617
+ beam_hyp.add(
618
+ input_ids[batch_beam_idx].clone(),
619
+ next_score.item(),
620
+ beam_indices=beam_index,
621
+ generated_len=cur_len - decoder_prompt_len,
622
+ )
623
+ else:
624
+ # add next predicted token since it is not eos_token
625
+ next_beam_scores[batch_idx, beam_idx] = next_score
626
+ next_beam_tokens[batch_idx, beam_idx] = next_token
627
+ next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
628
+ beam_idx += 1
629
+
630
+ # once the beam for next step is full, don't add more tokens to it.
631
+ if beam_idx == self.group_size:
632
+ break
633
+
634
+ new_scores, new_tokens, new_indices = self.step_sentence_constraint(
635
+ batch_idx,
636
+ input_ids,
637
+ scores_for_all_vocab,
638
+ next_beam_scores[batch_idx],
639
+ next_beam_tokens[batch_idx],
640
+ next_beam_indices[batch_idx],
641
+ )
642
+
643
+ next_beam_scores[batch_idx] = new_scores
644
+ next_beam_tokens[batch_idx] = new_tokens
645
+ next_beam_indices[batch_idx] = new_indices
646
+
647
+ if beam_idx < self.group_size:
648
+ raise ValueError(
649
+ f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:"
650
+ f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
651
+ )
652
+
653
+ # Check if we are done so that we can save a pad step if all(done)
654
+ self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done(
655
+ next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len
656
+ )
657
+
658
+ return UserDict(
659
+ {
660
+ "next_beam_scores": next_beam_scores.view(-1),
661
+ "next_beam_tokens": next_beam_tokens.view(-1),
662
+ "next_beam_indices": next_beam_indices.view(-1),
663
+ }
664
+ )
665
+
666
+ def step_sentence_constraint(
667
+ self,
668
+ batch_idx: int,
669
+ input_ids: torch.LongTensor,
670
+ vocab_scores: torch.FloatTensor,
671
+ sent_beam_scores: torch.FloatTensor,
672
+ sent_beam_tokens: torch.LongTensor,
673
+ sent_beam_indices: torch.LongTensor,
674
+ push_progress: bool = False,
675
+ ):
676
+ # sent_beam_tokens are the next {num_beams} number of tokens that are under consideration for this beam
677
+ # (candidate next tokens)
678
+
679
+ # 1. Adding "advance_tokens"
680
+ # using ConstraintStateList.advance(), we propose new tokens to be added into this "candidate list" that will
681
+ # advance us in fulfilling the constraints.
682
+
683
+ # 2. Selecting best candidates such that we end up with highest probable candidates
684
+ # that fulfill our constraints.
685
+
686
+ orig_len = sent_beam_indices.size(0)
687
+ device = sent_beam_indices.device
688
+
689
+ # initialize states
690
+ topk_contraint_states = self.make_constraint_states(orig_len)
691
+ advance_constraint_states = self.make_constraint_states(orig_len)
692
+
693
+ sidx, eidx = batch_idx * orig_len, (batch_idx + 1) * orig_len
694
+ this_batch_input_ids = input_ids[sidx:eidx]
695
+ this_batch_token_scores = vocab_scores[sidx:eidx]
696
+ full_hypotheses = torch.cat((input_ids[sent_beam_indices], sent_beam_tokens.unsqueeze(-1)), dim=-1)
697
+
698
+ # need to make new hypothesis that advance the constraints
699
+ track_new = {
700
+ "new_seqs": full_hypotheses.tolist(),
701
+ "new_states": [],
702
+ "new_indices": [],
703
+ "new_tokens": [],
704
+ "new_scores": [],
705
+ }
706
+ for seq_idx, pre_seq in enumerate(this_batch_input_ids):
707
+ # pre_seq = ith sequence generated before this step.
708
+
709
+ # input_ids -> (topk) generic beam search best model next tokens
710
+ # -> (advance) constraints forcing the next token
711
+ # either way, we need to sort them into "banks" later, so store a "ConstraintListState" for all types of
712
+ # hypotheses.
713
+
714
+ topk_state = topk_contraint_states[seq_idx]
715
+ topk_state.reset(full_hypotheses[seq_idx].cpu().tolist())
716
+
717
+ advance_state = advance_constraint_states[seq_idx]
718
+ advance_state.reset(pre_seq.cpu().tolist())
719
+
720
+ if not advance_state.completed:
721
+ advance_tokens = torch.LongTensor(advance_state.advance()).to(device)
722
+ for advance_token in advance_tokens:
723
+ # since adding each `advance_token` leads to a different hypothesis, create new state instance.
724
+ new_state = advance_state.copy(stateful=True)
725
+ new_state.add(advance_token.cpu().tolist())
726
+
727
+ advance_seq = torch.cat((pre_seq, advance_token.unsqueeze(0)), -1).cpu().tolist()
728
+ if advance_seq not in track_new["new_seqs"]:
729
+ # prevent duplicates, which are basically bound to happen in this process.
730
+ track_new["new_seqs"].append(advance_seq)
731
+ track_new["new_indices"].append(sidx + seq_idx) # idx -> global idx across all the batches
732
+ track_new["new_tokens"].append(advance_token)
733
+ track_new["new_scores"].append(this_batch_token_scores[seq_idx].take(advance_token))
734
+ track_new["new_states"].append(new_state)
735
+ elif push_progress:
736
+ # Basically, `sent_beam_indices` often chooses very little among `input_ids` the generated sequences that
737
+ # actually fulfill our constraints. For example, let constraints == ["loves pies"] and
738
+
739
+ # pre_seq_1 = "The child loves pies and" pre_seq_2 = "The child plays in the playground and"
740
+
741
+ # Without this step, if `sent_beam_indices` is something like [1,1], then
742
+ # 1. `pre_seq_1` won't be added to the list of (topk) hypothesis since it's not in the indices and
743
+ # 2. it won't be added to the list of (advance) hypothesis since it's completed already. (this is
744
+ # the else part of `if constraints_completed[seq_idx]`)
745
+ # 3. it ends up simply getting removed from consideration.
746
+
747
+ # #3 might be fine and actually desired, since it's likely that it's a low-probability output anyways,
748
+ # especially if it's not in the list of `sent_beam_indices`. But this often leads to lengthened beam
749
+ # search times, since completed sequences keep getting removed after all this effort for constrained
750
+ # generation.
751
+
752
+ # Here, we basically take `pre_seq_1` and to "push" it into the considered list of hypotheses, by simply
753
+ # appending the next likely token in the vocabulary and adding it to the list of hypotheses.
754
+
755
+ new_score, new_token = torch.max(this_batch_token_scores[seq_idx], 0) # some next probable token
756
+ advance_seq = torch.cat((pre_seq, new_token.unsqueeze(0)), -1)
757
+
758
+ advance_state = advance_constraint_states[seq_idx]
759
+
760
+ advance_seq = advance_seq.cpu().tolist()
761
+
762
+ advance_state.reset(advance_seq)
763
+ if advance_seq not in track_new["new_seqs"]:
764
+ # but still don't want to have duplicates
765
+ track_new["new_seqs"].append(advance_seq)
766
+ track_new["new_indices"].append(seq_idx)
767
+ track_new["new_tokens"].append(new_token)
768
+ track_new["new_scores"].append(new_score)
769
+ track_new["new_states"].append(advance_state)
770
+
771
+ if len(track_new["new_indices"]) > 0:
772
+ new_indices = torch.tensor(track_new["new_indices"]).to(device)
773
+ new_tokens = torch.stack(track_new["new_tokens"]).to(device)
774
+ new_scores = torch.stack(track_new["new_scores"]).to(device)
775
+
776
+ all_states = topk_contraint_states + track_new["new_states"]
777
+ all_tokens = torch.cat((sent_beam_tokens, new_tokens), -1)
778
+ all_scores = torch.cat((sent_beam_scores, new_scores), -1)
779
+ all_banks = torch.tensor([one.get_bank() for one in all_states]).to(device)
780
+
781
+ zipped = all_banks * 100 + all_scores
782
+ indices = zipped.sort(descending=True).indices
783
+ sorted_banks = all_banks[indices]
784
+
785
+ # Then we end up with {sorted among bank C}, {sorted among bank C-1}, ..., {sorted among bank 0}
786
+
787
+ counter = -1
788
+ cur_bank = sorted_banks[0]
789
+ increments = []
790
+ for bank in sorted_banks:
791
+ if bank == cur_bank:
792
+ counter += 1
793
+ else:
794
+ counter = 0
795
+ cur_bank = bank
796
+ increments.append(counter)
797
+ rearrangers = torch.tensor(np.argsort(increments, kind="mergesort"))
798
+
799
+ indices = indices[rearrangers][:orig_len]
800
+
801
+ sent_beam_scores = all_scores[indices]
802
+ sent_beam_tokens = all_tokens[indices]
803
+ sent_beam_indices = torch.cat((sent_beam_indices, new_indices))[indices]
804
+
805
+ return sent_beam_scores, sent_beam_tokens, sent_beam_indices
806
+
807
+ def finalize(
808
+ self,
809
+ input_ids: torch.LongTensor,
810
+ final_beam_scores: torch.FloatTensor,
811
+ final_beam_tokens: torch.LongTensor,
812
+ final_beam_indices: torch.LongTensor,
813
+ max_length: int,
814
+ pad_token_id: Optional[int] = None,
815
+ eos_token_id: Optional[Union[int, List[int]]] = None,
816
+ beam_indices: Optional[torch.LongTensor] = None,
817
+ decoder_prompt_len: Optional[int] = 0,
818
+ ) -> Tuple[torch.LongTensor]:
819
+ batch_size = len(self._beam_hyps)
820
+
821
+ if isinstance(eos_token_id, int):
822
+ eos_token_id = [eos_token_id]
823
+
824
+ # finalize all open beam hypotheses and add to generated hypotheses
825
+ for batch_idx, beam_hyp in enumerate(self._beam_hyps):
826
+ if self._done[batch_idx]:
827
+ continue
828
+
829
+ # all open beam hypotheses are added to the beam hypothesis
830
+ # beam hypothesis class automatically keeps the best beams
831
+
832
+ ids_collect = []
833
+ for beam_id in range(self.num_beams):
834
+ batch_beam_idx = batch_idx * self.num_beams + beam_id
835
+ final_score = final_beam_scores[batch_beam_idx].item()
836
+ final_tokens = input_ids[batch_beam_idx]
837
+
838
+ completes_constraint = self.check_completes_constraints(final_tokens.cpu().tolist())
839
+ if completes_constraint:
840
+ beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None
841
+ generated_len = final_tokens.shape[-1] - decoder_prompt_len
842
+ beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len)
843
+ ids_collect.append(beam_id)
844
+
845
+ # due to overly complex constraints or other factors, sometimes we can't gaurantee a successful
846
+ # generation. In these cases we simply return the highest scoring outputs.
847
+ if len(ids_collect) < self.num_beam_hyps_to_keep:
848
+ for beam_id in range(self.num_beams):
849
+ if beam_id not in ids_collect:
850
+ batch_beam_idx = batch_idx * self.num_beams + beam_id
851
+ final_score = final_beam_scores[batch_beam_idx].item()
852
+ final_tokens = input_ids[batch_beam_idx]
853
+ generated_len = final_tokens.shape[-1] - decoder_prompt_len
854
+ beam_hyp.add(final_tokens, final_score, generated_len=generated_len)
855
+ if len(ids_collect) >= self.num_beam_hyps_to_keep:
856
+ break
857
+
858
+ # select the best hypotheses
859
+ sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep)
860
+ best = []
861
+ best_indices = []
862
+ best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
863
+
864
+ # retrieve best hypotheses
865
+ for i, beam_hyp in enumerate(self._beam_hyps):
866
+ sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0])
867
+ for j in range(self.num_beam_hyps_to_keep):
868
+ best_hyp_tuple = sorted_hyps.pop()
869
+ best_score = best_hyp_tuple[0]
870
+ best_hyp = best_hyp_tuple[1]
871
+ best_index = best_hyp_tuple[2]
872
+ sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
873
+
874
+ # append to lists
875
+ best.append(best_hyp)
876
+
877
+ # append indices to list
878
+ best_indices.append(best_index)
879
+
880
+ best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
881
+
882
+ # prepare for adding eos
883
+ sent_lengths_max = sent_lengths.max().item() + 1
884
+
885
+ sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max
886
+ decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
887
+
888
+ if len(best_indices) > 0 and best_indices[0] is not None:
889
+ indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
890
+ else:
891
+ indices = None
892
+
893
+ # shorter batches are padded if needed
894
+ if sent_lengths.min().item() != sent_lengths.max().item():
895
+ if pad_token_id is None:
896
+ raise ValueError("`pad_token_id` has to be defined")
897
+ decoded.fill_(pad_token_id)
898
+
899
+ if indices is not None:
900
+ indices.fill_(-1)
901
+
902
+ # fill with hypotheses and eos_token_id if the latter fits in
903
+ for i, (hypo, best_idx) in enumerate(zip(best, best_indices)):
904
+ decoded[i, : sent_lengths[i]] = hypo
905
+
906
+ if indices is not None:
907
+ indices[i, : len(best_idx)] = torch.tensor(best_idx)
908
+
909
+ if sent_lengths[i] < sent_max_len:
910
+ # inserting only the first eos_token_id
911
+ decoded[i, sent_lengths[i]] = eos_token_id[0]
912
+
913
+ return UserDict(
914
+ {
915
+ "sequences": decoded,
916
+ "sequence_scores": best_scores,
917
+ "beam_indices": indices,
918
+ }
919
+ )
920
+
921
+
922
+ class BeamHypotheses:
923
+ def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool, max_length: Optional[int] = None):
924
+ """
925
+ Initialize n-best list of hypotheses.
926
+ """
927
+ self.length_penalty = length_penalty
928
+ self.early_stopping = early_stopping
929
+ self.max_length = max_length
930
+ self.num_beams = num_beams
931
+ self.beams = []
932
+ self.worst_score = 1e9
933
+
934
+ if not isinstance(self.early_stopping, bool) and self.max_length is None:
935
+ raise ValueError(
936
+ "When `do_early_stopping` is set to a string, `max_length` must be defined. Ensure it is passed to the"
937
+ " BeamScorer class instance at initialization time."
938
+ )
939
+
940
+ def __len__(self):
941
+ """
942
+ Number of hypotheses in the list.
943
+ """
944
+ return len(self.beams)
945
+
946
+ def add(
947
+ self,
948
+ hyp: torch.LongTensor,
949
+ sum_logprobs: float,
950
+ beam_indices: Optional[torch.LongTensor] = None,
951
+ generated_len: Optional[int] = None,
952
+ ):
953
+ """
954
+ Add a new hypothesis to the list.
955
+ """
956
+ if generated_len is not None:
957
+ score = sum_logprobs / (generated_len**self.length_penalty)
958
+ # This 'else' case exists for retrocompatibility
959
+ else:
960
+ score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty)
961
+
962
+ if len(self) < self.num_beams or score > self.worst_score:
963
+ self.beams.append((score, hyp, beam_indices))
964
+ if len(self) > self.num_beams:
965
+ sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)])
966
+ del self.beams[sorted_next_scores[0][1]]
967
+ self.worst_score = sorted_next_scores[1][0]
968
+ else:
969
+ self.worst_score = min(score, self.worst_score)
970
+
971
+ def is_done(self, best_sum_logprobs: float, cur_len: int, decoder_prompt_len: Optional[int] = 0) -> bool:
972
+ """
973
+ If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst
974
+ one in the heap, then we are done with this sentence.
975
+ """
976
+
977
+ if len(self) < self.num_beams:
978
+ return False
979
+
980
+ # `True`: stop as soon as at least `num_beams` hypotheses are finished
981
+ if self.early_stopping is True:
982
+ return True
983
+ # `False`: heuristic -- compute best possible score from `cur_len`, even though it is not entirely accurate
984
+ # when `length_penalty` is positive. See the discussion below for more details.
985
+ # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
986
+ elif self.early_stopping is False:
987
+ highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty
988
+ ret = self.worst_score >= highest_attainable_score
989
+ return ret
990
+ # `"never"`: compute the best possible score, depending on the signal of `length_penalty`
991
+ else:
992
+ # `length_penalty` > 0.0 -> max denominator is obtaned from `max_length`, not from `cur_len` -> min
993
+ # abs(`highest_attainable_score`) is obtained -> `highest_attainable_score` is negative, hence we obtain
994
+ # its max this way
995
+ if self.length_penalty > 0.0:
996
+ if self.max_length <= decoder_prompt_len:
997
+ raise ValueError("max_length is not larger than decoder prompt length")
998
+ highest_attainable_score = (
999
+ best_sum_logprobs / (self.max_length - decoder_prompt_len) ** self.length_penalty
1000
+ )
1001
+ # the opposite logic applies here (max `highest_attainable_score` from `cur_len`)
1002
+ else:
1003
+ highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty
1004
+ ret = self.worst_score >= highest_attainable_score
1005
+ return ret
env-llmeval/lib/python3.10/site-packages/transformers/generation/candidate_generator.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import copy
17
+ from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
18
+
19
+ import torch
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from ..modeling_utils import PreTrainedModel
24
+ from .configuration_utils import GenerationConfig
25
+ from .logits_process import LogitsProcessorList
26
+
27
+
28
+ class CandidateGenerator:
29
+ """Abstract base class for all candidate generators that can be applied during assisted generation."""
30
+
31
+ def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
32
+ """
33
+ Fetches the candidates to be tried for the current input.
34
+
35
+ Args:
36
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
37
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
38
+
39
+ Return:
40
+ `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
41
+ assessed by the model and, optionally, a `torch.FloatTensor` of shape `(batch_size, candidate_length,
42
+ vocabulary_size)` containing the logits associated to each candidate.
43
+ """
44
+ raise NotImplementedError(
45
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can call `get_candidates`."
46
+ )
47
+
48
+ def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
49
+ """
50
+ Updates the candidate generation strategy based on the outcomes.
51
+
52
+ Args:
53
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
54
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
55
+ scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
56
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
57
+ beam search or log softmax for each vocabulary token when using beam search
58
+ num_matches (`int`):
59
+ The number of matches between the candidate sequences and the model predictions.
60
+ """
61
+ raise NotImplementedError(
62
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can call "
63
+ "`update_candidate_strategy`."
64
+ )
65
+
66
+
67
+ class AssistedCandidateGenerator(CandidateGenerator):
68
+ """
69
+ `CandidateGenerator` class to be used for assisted generation and speculative decoding. This class generates
70
+ candidates through the use of a smaller model. Read the following blog post for more information:
71
+ https://huggingface.co/blog/assisted-generation
72
+
73
+ Args:
74
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
75
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
76
+ assistant_model (`PreTrainedModel`):
77
+ The model to be used for generating candidates. This model should be smaller than the main model.
78
+ generation_config (`~generation.GenerationConfig`, *optional*):
79
+ The generation configuration to be used as base parametrization for the generation call.
80
+ logits_processor (`LogitsProcessorList`):
81
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
82
+ used to modify the prediction scores of the language modeling head applied at each generation step.
83
+ model_kwargs (`Dict`):
84
+ The keyword arguments that will be passed to the main model, and are used as base inputs for the assistant
85
+ model as well.
86
+ inputs_tensor (`torch.Tensor`, *optional*):
87
+ The model input tensor. In encoder-decoder models, this is the encoder input.
88
+ """
89
+
90
+ def __init__(
91
+ self,
92
+ input_ids: torch.LongTensor,
93
+ assistant_model: "PreTrainedModel",
94
+ generation_config: "GenerationConfig",
95
+ logits_processor: "LogitsProcessorList",
96
+ model_kwargs: Dict,
97
+ inputs_tensor: Optional[torch.Tensor] = None,
98
+ ):
99
+ # Make sure all data at the same device as assistant model
100
+ device = assistant_model.device
101
+ input_ids = input_ids.to(device)
102
+ if inputs_tensor is not None:
103
+ inputs_tensor = inputs_tensor.to(device)
104
+
105
+ # Prepare the assistant and the starting number of candidate tokens
106
+ self.assistant_model = assistant_model
107
+ self.num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens
108
+
109
+ # Prepare the kwargs for the assistant model
110
+ assistant_kwargs = {}
111
+ for key, value in model_kwargs.items(): # deepcopy crashes if we attempt to copy encoder outputs with grads
112
+ if key not in ("encoder_outputs", "assistant_encoder_outputs"):
113
+ assistant_kwargs[key] = (
114
+ value.detach().to(device) if isinstance(value, torch.Tensor) else copy.deepcopy(value)
115
+ )
116
+
117
+ if "assistant_encoder_outputs" in model_kwargs:
118
+ assistant_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"]
119
+ elif assistant_model.config.is_encoder_decoder:
120
+ inputs_tensor, model_input_name, assistant_kwargs = assistant_model._prepare_model_inputs(
121
+ inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_kwargs
122
+ )
123
+ assistant_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation(
124
+ inputs_tensor, assistant_kwargs, model_input_name
125
+ )
126
+ elif "encoder_outputs" in model_kwargs:
127
+ assistant_kwargs["encoder_outputs"] = model_kwargs["encoder_outputs"]
128
+ self.assistant_kwargs = assistant_kwargs
129
+
130
+ # Prepare assistant model's keys of inputs
131
+ if assistant_model.config.is_encoder_decoder:
132
+ # both are encoder-decoder
133
+ self.input_ids_key = "decoder_input_ids"
134
+ self.attention_key = "decoder_attention_mask"
135
+ elif "encoder_outputs" in assistant_kwargs:
136
+ # special case for encoder-decoder with decoder-only assistant (like DistilWhisper)
137
+ self.input_ids_key = "input_ids"
138
+ self.attention_key = "attention_mask"
139
+ self.assistant_kwargs["attention_mask"] = self.assistant_kwargs.get(
140
+ "decoder_attention_mask",
141
+ torch.ones((input_ids.shape[0], 1), device=input_ids.device, dtype=torch.long),
142
+ )
143
+ else:
144
+ # both are decoder-only
145
+ self.input_ids_key = "input_ids"
146
+ self.attention_key = "attention_mask"
147
+
148
+ # Prepare generation-related options.
149
+ eos_token_id = generation_config.eos_token_id
150
+ if isinstance(eos_token_id, int):
151
+ eos_token_id = [eos_token_id]
152
+ self.eos_token_id_tensor = (
153
+ torch.tensor(eos_token_id).to(input_ids.device) if eos_token_id is not None else None
154
+ )
155
+ self.logits_processor = logits_processor
156
+ self.generation_config = copy.deepcopy(generation_config)
157
+ self.generation_config.return_dict_in_generate = True
158
+ self.generation_config.output_scores = True
159
+
160
+ def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
161
+ """
162
+ Fetches the candidates to be tried for the current input.
163
+
164
+ Args:
165
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
166
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
167
+
168
+ Return:
169
+ `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
170
+ assessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length,
171
+ vocabulary_size)` containing the logits associated to each candidate.
172
+ """
173
+ input_ids = input_ids.to(self.assistant_model.device)
174
+
175
+ # Don't generate more than `max_length - 1` candidates since the target model generates one extra token.
176
+ new_cur_len = input_ids.shape[-1]
177
+ max_new_tokens = min(int(self.num_assistant_tokens), self.generation_config.max_length - new_cur_len - 1)
178
+ if max_new_tokens == 0:
179
+ return input_ids, None
180
+
181
+ # 1. If it is not the first round of candidate generation, prepare the inputs based on the input_ids length
182
+ # (which implicitly contains the number of accepted candidates from the previous round)
183
+ has_past_key_values = self.assistant_kwargs.get("past_key_values", None) is not None
184
+ if has_past_key_values:
185
+ new_cache_size = new_cur_len - 1
186
+ self.assistant_kwargs["past_key_values"] = _crop_past_key_values(
187
+ self.assistant_model, self.assistant_kwargs["past_key_values"], new_cache_size - 1
188
+ ) # the assistant does not have the token after the last match, hence the -1
189
+
190
+ self.assistant_kwargs = _prepare_attention_mask(
191
+ self.assistant_kwargs, new_cur_len, self.assistant_model.config.is_encoder_decoder
192
+ )
193
+ self.assistant_kwargs = _prepare_token_type_ids(self.assistant_kwargs, new_cur_len)
194
+
195
+ # 2. Forecast next N tokens using the assistant model.
196
+ assistant_generation_kwargs = {
197
+ self.input_ids_key: input_ids,
198
+ "max_new_tokens": max_new_tokens,
199
+ "generation_config": self.generation_config,
200
+ "logits_processor": self.logits_processor,
201
+ }
202
+
203
+ assistant_output = self.assistant_model.generate(**assistant_generation_kwargs, **self.assistant_kwargs)
204
+
205
+ # 3. Update variables for the next round of candidate generation
206
+ self.assistant_kwargs["past_key_values"] = assistant_output.past_key_values
207
+
208
+ # 4. Prepare variables for output
209
+ candidate_logits = torch.stack(assistant_output.scores, dim=1)
210
+ candidate_ids = assistant_output.sequences
211
+ return candidate_ids, candidate_logits
212
+
213
+ def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
214
+ """
215
+ Updates the candidate generation strategy based on the outcomes.
216
+
217
+ Args:
218
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
219
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
220
+ scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
221
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
222
+ beam search or log softmax for each vocabulary token when using beam search
223
+ num_matches (`int`):
224
+ The number of matches between the candidate sequences and the model predictions.
225
+ """
226
+ # Adjust the max number of assistant tokens to use in the next iteration. This is a simple heuristic,
227
+ # probably can be improved -- we want to balance the benefits of getting assistant tokens correct with the
228
+ # cost of forecasting incorrect assistant tokens.
229
+ if self.assistant_model.generation_config.num_assistant_tokens_schedule in {
230
+ "heuristic",
231
+ "heuristic_transient",
232
+ }:
233
+ if num_matches == int(self.num_assistant_tokens):
234
+ self.num_assistant_tokens += 2.0
235
+ else:
236
+ self.num_assistant_tokens = max(1.0, self.num_assistant_tokens - 1.0)
237
+
238
+
239
+ class PromptLookupCandidateGenerator(CandidateGenerator):
240
+ """
241
+ `CandidateGenerator` class to be used for prompt lookup generation. This class generates candidates by looking up
242
+ likely continuations in the provided prompt (input_ids) itself.
243
+ Read the following blog post for more information: https://github.com/apoorvumang/prompt-lookup-decoding
244
+
245
+ Args:
246
+ max_matching_ngram_size (`int`):
247
+ The maximum ngram size to be considered for matching in the prompt
248
+ num_output_tokens (`int`):
249
+ The number of tokens to be output as candidate tokens.
250
+ """
251
+
252
+ def __init__(
253
+ self,
254
+ num_output_tokens: int = 10,
255
+ max_matching_ngram_size: int = None,
256
+ ):
257
+ self.num_output_tokens = num_output_tokens
258
+ self.max_matching_ngram_size = max_matching_ngram_size if max_matching_ngram_size else 2
259
+
260
+ if self.max_matching_ngram_size <= 0 or self.num_output_tokens <= 0:
261
+ raise ValueError("Invalid max_matching_ngram_size or num_output_tokens")
262
+
263
+ def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
264
+ """
265
+ Fetches the candidates to be tried for the current input.
266
+
267
+ Args:
268
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
269
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
270
+
271
+ Return:
272
+ `torch.LongTensor` of shape `(num_candidates, candidate_length)`: The candidate sequences to be tried.
273
+ """
274
+ input_length = input_ids.size(1)
275
+
276
+ chosen_ids = None
277
+ match_found = False
278
+ for ngram_size in range(min(self.max_matching_ngram_size, input_length - 1), 0, -1):
279
+ # Create sliding windows of size ngram_size
280
+ windows = input_ids.unfold(dimension=1, size=ngram_size, step=1)
281
+
282
+ # Convert ngram to a tensor for comparison
283
+ ngram_tensor = input_ids[0, -ngram_size:]
284
+
285
+ # Find where the windows match the ngram
286
+ matches = (windows == ngram_tensor).all(dim=2)
287
+
288
+ # Get the indices of matches
289
+ match_indices = matches.nonzero(as_tuple=True)[1]
290
+
291
+ # Iterate through match indices to find a valid continuation
292
+ for idx in match_indices:
293
+ start_idx = idx + ngram_size
294
+ end_idx = start_idx + self.num_output_tokens
295
+ end_idx = min(end_idx, input_length)
296
+
297
+ if start_idx < end_idx:
298
+ chosen_ids = input_ids[0, start_idx:end_idx]
299
+ match_found = True
300
+ break
301
+ if match_found:
302
+ break
303
+
304
+ if chosen_ids is None or len(chosen_ids) == 0:
305
+ # In case we didn't find a match return the input sequence unchanged, reverts back to autoregressive decoding
306
+ return input_ids, None
307
+
308
+ # Now need extend input_ids with chosen_ids
309
+ chosen_ids = chosen_ids.unsqueeze(0)
310
+ candidate_input_ids = torch.cat((input_ids, chosen_ids), dim=1)
311
+ # assisted_generation expects logits as well, but we don't have those here, so returning None
312
+ return candidate_input_ids, None
313
+
314
+ def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
315
+ """
316
+ Updates the candidate generation strategy based on the outcomes.
317
+
318
+ Args:
319
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
320
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
321
+ scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
322
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
323
+ beam search or log softmax for each vocabulary token when using beam search
324
+ num_matches (`int`):
325
+ The number of matches between the candidate sequences and the model predictions.
326
+ """
327
+ # Currently does nothing
328
+ return
329
+
330
+
331
+ def _crop_past_key_values(model, past_key_values, maximum_length):
332
+ """Crops the past key values up to a certain maximum length."""
333
+ new_past = []
334
+ if model.config.is_encoder_decoder:
335
+ for idx in range(len(past_key_values)):
336
+ new_past.append(
337
+ (
338
+ past_key_values[idx][0][:, :, :maximum_length, :],
339
+ past_key_values[idx][1][:, :, :maximum_length, :],
340
+ past_key_values[idx][2],
341
+ past_key_values[idx][3],
342
+ )
343
+ )
344
+ past_key_values = tuple(new_past)
345
+ # bloom is special
346
+ elif "bloom" in model.__class__.__name__.lower() or (
347
+ model.config.architectures is not None and "bloom" in model.config.architectures[0].lower()
348
+ ):
349
+ for idx in range(len(past_key_values)):
350
+ new_past.append(
351
+ (
352
+ past_key_values[idx][0][:, :, :maximum_length],
353
+ past_key_values[idx][1][:, :maximum_length, :],
354
+ )
355
+ )
356
+ past_key_values = tuple(new_past)
357
+ # gptbigcode is too
358
+ elif "gptbigcode" in model.__class__.__name__.lower() or (
359
+ model.config.architectures is not None and "gptbigcode" in model.config.architectures[0].lower()
360
+ ):
361
+ if model.config.multi_query:
362
+ for idx in range(len(past_key_values)):
363
+ past_key_values[idx] = past_key_values[idx][:, :maximum_length, :]
364
+ else:
365
+ for idx in range(len(past_key_values)):
366
+ past_key_values[idx] = past_key_values[idx][:, :, :maximum_length, :]
367
+ else:
368
+ for idx in range(len(past_key_values)):
369
+ new_past.append(
370
+ (
371
+ past_key_values[idx][0][:, :, :maximum_length, :],
372
+ past_key_values[idx][1][:, :, :maximum_length, :],
373
+ )
374
+ )
375
+ past_key_values = tuple(new_past)
376
+ return past_key_values
377
+
378
+
379
+ def _prepare_attention_mask(model_kwargs: Dict[str, Any], new_length: int, is_encoder_decoder: bool) -> Dict[str, Any]:
380
+ """Expands or crops the model's mask for decoding purposes, to the defined length"""
381
+
382
+ mask_key = "decoder_attention_mask" if is_encoder_decoder else "attention_mask"
383
+ if mask_key not in model_kwargs:
384
+ return model_kwargs
385
+
386
+ mask = model_kwargs[mask_key]
387
+ mask_length_diff = new_length - mask.shape[1]
388
+
389
+ if mask_length_diff < 0:
390
+ model_kwargs[mask_key] = mask[:, :mask_length_diff]
391
+ elif mask_length_diff > 0:
392
+ model_kwargs[mask_key] = torch.cat([mask, mask.new_ones((mask.shape[0], mask_length_diff))], dim=-1)
393
+ return model_kwargs
394
+
395
+
396
+ def _prepare_token_type_ids(model_kwargs: Dict[str, Any], new_length: int) -> Dict[str, Any]:
397
+ """Expands or crops the model's token_type_ids for decoding purposes, to the defined length"""
398
+ if "token_type_ids" not in model_kwargs or model_kwargs["token_type_ids"] is None:
399
+ return model_kwargs
400
+
401
+ token_type_ids = model_kwargs["token_type_ids"]
402
+ final_token_type = token_type_ids[:, -1].unsqueeze(-1)
403
+ type_length_diff = new_length - token_type_ids.shape[1]
404
+
405
+ if type_length_diff < 0:
406
+ token_type_ids = token_type_ids[:, :type_length_diff]
407
+ elif type_length_diff > 0:
408
+ token_type_copies = final_token_type.repeat(1, type_length_diff)
409
+ model_kwargs["token_type_ids"] = torch.cat([model_kwargs["token_type_ids"], token_type_copies], dim=-1)
410
+ return model_kwargs
env-llmeval/lib/python3.10/site-packages/transformers/generation/configuration_utils.py ADDED
@@ -0,0 +1,1092 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Generation configuration class and utilities."""
16
+
17
+ import copy
18
+ import json
19
+ import os
20
+ import warnings
21
+ from typing import TYPE_CHECKING, Any, Dict, Optional, Union
22
+
23
+ from .. import __version__
24
+ from ..configuration_utils import PretrainedConfig
25
+ from ..utils import (
26
+ GENERATION_CONFIG_NAME,
27
+ ExplicitEnum,
28
+ PushToHubMixin,
29
+ cached_file,
30
+ download_url,
31
+ extract_commit_hash,
32
+ is_remote_url,
33
+ logging,
34
+ )
35
+
36
+
37
+ if TYPE_CHECKING:
38
+ from ..modeling_utils import PreTrainedModel
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+ METADATA_FIELDS = ("_from_model_config", "_commit_hash", "_original_object_hash", "transformers_version")
43
+
44
+
45
+ class GenerationMode(ExplicitEnum):
46
+ """
47
+ Possible generation modes, downstream of the [`~generation.GenerationMixin.generate`] method.
48
+ """
49
+
50
+ # Non-beam methods
51
+ CONTRASTIVE_SEARCH = "contrastive_search"
52
+ GREEDY_SEARCH = "greedy_search"
53
+ SAMPLE = "sample"
54
+ ASSISTED_GENERATION = "assisted_generation"
55
+ # Beam methods
56
+ BEAM_SEARCH = "beam_search"
57
+ BEAM_SAMPLE = "beam_sample"
58
+ CONSTRAINED_BEAM_SEARCH = "constrained_beam_search"
59
+ GROUP_BEAM_SEARCH = "group_beam_search"
60
+
61
+
62
+ class GenerationConfig(PushToHubMixin):
63
+ # no-format
64
+ r"""
65
+ Class that holds a configuration for a generation task. A `generate` call supports the following generation methods
66
+ for text-decoder, text-to-text, speech-to-text, and vision-to-text models:
67
+
68
+ - *greedy decoding* by calling [`~generation.GenerationMixin._greedy_search`] if `num_beams=1` and
69
+ `do_sample=False`
70
+ - *contrastive search* by calling [`~generation.GenerationMixin._contrastive_search`] if `penalty_alpha>0.`
71
+ and `top_k>1`
72
+ - *multinomial sampling* by calling [`~generation.GenerationMixin._sample`] if `num_beams=1` and
73
+ `do_sample=True`
74
+ - *beam-search decoding* by calling [`~generation.GenerationMixin._beam_search`] if `num_beams>1` and
75
+ `do_sample=False`
76
+ - *beam-search multinomial sampling* by calling [`~generation.GenerationMixin._beam_sample`] if
77
+ `num_beams>1` and `do_sample=True`
78
+ - *diverse beam-search decoding* by calling [`~generation.GenerationMixin._group_beam_search`], if
79
+ `num_beams>1` and `num_beam_groups>1`
80
+ - *constrained beam-search decoding* by calling [`~generation.GenerationMixin._constrained_beam_search`], if
81
+ `constraints!=None` or `force_words_ids!=None`
82
+ - *assisted decoding* by calling [`~generation.GenerationMixin._assisted_decoding`], if
83
+ `assistant_model` or `prompt_lookup_num_tokens` is passed to `.generate()`
84
+
85
+ You do not need to call any of the above methods directly. Pass custom parameter values to '.generate()'. To learn
86
+ more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
87
+
88
+ <Tip>
89
+
90
+ A large number of these flags control the logits or the stopping criteria of the generation. Make sure you check
91
+ the [generate-related classes](https://huggingface.co/docs/transformers/internal/generation_utils) for a full
92
+ description of the possible manipulations, as well as examples of their usage.
93
+
94
+ </Tip>
95
+
96
+ Arg:
97
+ > Parameters that control the length of the output
98
+
99
+ max_length (`int`, *optional*, defaults to 20):
100
+ The maximum length the generated tokens can have. Corresponds to the length of the input prompt +
101
+ `max_new_tokens`. Its effect is overridden by `max_new_tokens`, if also set.
102
+ max_new_tokens (`int`, *optional*):
103
+ The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.
104
+ min_length (`int`, *optional*, defaults to 0):
105
+ The minimum length of the sequence to be generated. Corresponds to the length of the input prompt +
106
+ `min_new_tokens`. Its effect is overridden by `min_new_tokens`, if also set.
107
+ min_new_tokens (`int`, *optional*):
108
+ The minimum numbers of tokens to generate, ignoring the number of tokens in the prompt.
109
+ early_stopping (`bool` or `str`, *optional*, defaults to `False`):
110
+ Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
111
+ `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
112
+ heuristic is applied and the generation stops when is it very unlikely to find better candidates;
113
+ `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
114
+ beam search algorithm).
115
+ max_time(`float`, *optional*):
116
+ The maximum amount of time you allow the computation to run for in seconds. generation will still finish
117
+ the current pass after allocated time has been passed.
118
+
119
+ > Parameters that control the generation strategy used
120
+
121
+ do_sample (`bool`, *optional*, defaults to `False`):
122
+ Whether or not to use sampling ; use greedy decoding otherwise.
123
+ num_beams (`int`, *optional*, defaults to 1):
124
+ Number of beams for beam search. 1 means no beam search.
125
+ num_beam_groups (`int`, *optional*, defaults to 1):
126
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
127
+ [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
128
+ penalty_alpha (`float`, *optional*):
129
+ The values balance the model confidence and the degeneration penalty in contrastive search decoding.
130
+ use_cache (`bool`, *optional*, defaults to `True`):
131
+ Whether or not the model should use the past last key/values attentions (if applicable to the model) to
132
+ speed up decoding.
133
+
134
+ > Parameters for manipulation of the model output logits
135
+
136
+ temperature (`float`, *optional*, defaults to 1.0):
137
+ The value used to modulate the next token probabilities.
138
+ top_k (`int`, *optional*, defaults to 50):
139
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
140
+ top_p (`float`, *optional*, defaults to 1.0):
141
+ If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to
142
+ `top_p` or higher are kept for generation.
143
+ typical_p (`float`, *optional*, defaults to 1.0):
144
+ Local typicality measures how similar the conditional probability of predicting a target token next is to
145
+ the expected conditional probability of predicting a random token next, given the partial text already
146
+ generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that
147
+ add up to `typical_p` or higher are kept for generation. See [this
148
+ paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.
149
+ epsilon_cutoff (`float`, *optional*, defaults to 0.0):
150
+ If set to float strictly between 0 and 1, only tokens with a conditional probability greater than
151
+ `epsilon_cutoff` will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the
152
+ size of the model. See [Truncation Sampling as Language Model
153
+ Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
154
+ eta_cutoff (`float`, *optional*, defaults to 0.0):
155
+ Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between
156
+ 0 and 1, a token is only considered if it is greater than either `eta_cutoff` or `sqrt(eta_cutoff) *
157
+ exp(-entropy(softmax(next_token_logits)))`. The latter term is intuitively the expected next token
158
+ probability, scaled by `sqrt(eta_cutoff)`. In the paper, suggested values range from 3e-4 to 2e-3,
159
+ depending on the size of the model. See [Truncation Sampling as Language Model
160
+ Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
161
+ diversity_penalty (`float`, *optional*, defaults to 0.0):
162
+ This value is subtracted from a beam's score if it generates a token same as any beam from other group at a
163
+ particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled.
164
+ repetition_penalty (`float`, *optional*, defaults to 1.0):
165
+ The parameter for repetition penalty. 1.0 means no penalty. See [this
166
+ paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
167
+ encoder_repetition_penalty (`float`, *optional*, defaults to 1.0):
168
+ The paramater for encoder_repetition_penalty. An exponential penalty on sequences that are not in the
169
+ original input. 1.0 means no penalty.
170
+ length_penalty (`float`, *optional*, defaults to 1.0):
171
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
172
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
173
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
174
+ `length_penalty` < 0.0 encourages shorter sequences.
175
+ no_repeat_ngram_size (`int`, *optional*, defaults to 0):
176
+ If set to int > 0, all ngrams of that size can only occur once.
177
+ bad_words_ids(`List[List[int]]`, *optional*):
178
+ List of list of token ids that are not allowed to be generated. Check
179
+ [`~generation.NoBadWordsLogitsProcessor`] for further documentation and examples.
180
+ force_words_ids(`List[List[int]]` or `List[List[List[int]]]`, *optional*):
181
+ List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of
182
+ words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this
183
+ triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one
184
+ can allow different forms of each word.
185
+ renormalize_logits (`bool`, *optional*, defaults to `False`):
186
+ Whether to renormalize the logits after applying all the logits processors or warpers (including the custom
187
+ ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits
188
+ are normalized but some logit processors or warpers break the normalization.
189
+ constraints (`List[Constraint]`, *optional*):
190
+ Custom constraints that can be added to the generation to ensure that the output will contain the use of
191
+ certain tokens as defined by `Constraint` objects, in the most sensible way possible.
192
+ forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`):
193
+ The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
194
+ multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
195
+ language token.
196
+ forced_eos_token_id (`Union[int, List[int]]`, *optional*, defaults to `model.config.forced_eos_token_id`):
197
+ The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a
198
+ list to set multiple *end-of-sequence* tokens.
199
+ remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`):
200
+ Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash.
201
+ Note that using `remove_invalid_values` can slow down generation.
202
+ exponential_decay_length_penalty (`tuple(int, float)`, *optional*):
203
+ This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been
204
+ generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where
205
+ penalty starts and `decay_factor` represents the factor of exponential decay
206
+ suppress_tokens (`List[int]`, *optional*):
207
+ A list of tokens that will be suppressed at generation. The `SupressTokens` logit processor will set their
208
+ log probs to `-inf` so that they are not sampled.
209
+ begin_suppress_tokens (`List[int]`, *optional*):
210
+ A list of tokens that will be suppressed at the beginning of the generation. The `SupressBeginTokens` logit
211
+ processor will set their log probs to `-inf` so that they are not sampled.
212
+ forced_decoder_ids (`List[List[int]]`, *optional*):
213
+ A list of pairs of integers which indicates a mapping from generation indices to token indices that will be
214
+ forced before sampling. For example, `[[1, 123]]` means the second generated token will always be a token
215
+ of index 123.
216
+ sequence_bias (`Dict[Tuple[int], float]`, *optional*)):
217
+ Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the
218
+ sequence being selected, while negative biases do the opposite. Check
219
+ [`~generation.SequenceBiasLogitsProcessor`] for further documentation and examples.
220
+ guidance_scale (`float`, *optional*):
221
+ The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`.
222
+ Higher guidance scale encourages the model to generate samples that are more closely linked to the input
223
+ prompt, usually at the expense of poorer quality.
224
+ low_memory (`bool`, *optional*):
225
+ Switch to sequential beam search and sequential topk for contrastive search to reduce peak memory.
226
+ Used with beam search and contrastive search.
227
+
228
+
229
+ > Parameters that define the output variables of `generate`
230
+
231
+ num_return_sequences(`int`, *optional*, defaults to 1):
232
+ The number of independently computed returned sequences for each element in the batch.
233
+ output_attentions (`bool`, *optional*, defaults to `False`):
234
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
235
+ tensors for more details.
236
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
237
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
238
+ more details.
239
+ output_scores (`bool`, *optional*, defaults to `False`):
240
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
241
+ output_logits (`bool`, *optional*):
242
+ Whether or not to return the unprocessed prediction logit scores. See `logits` under returned tensors for
243
+ more details.
244
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
245
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
246
+
247
+ > Special tokens that can be used at generation time
248
+
249
+ pad_token_id (`int`, *optional*):
250
+ The id of the *padding* token.
251
+ bos_token_id (`int`, *optional*):
252
+ The id of the *beginning-of-sequence* token.
253
+ eos_token_id (`Union[int, List[int]]`, *optional*):
254
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
255
+
256
+ > Generation parameters exclusive to encoder-decoder models
257
+
258
+ encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0):
259
+ If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the
260
+ `decoder_input_ids`.
261
+ decoder_start_token_id (`Union[int, List[int]]`, *optional*):
262
+ If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token or a list of length
263
+ `batch_size`. Indicating a list enables different start ids for each element in the batch
264
+ (e.g. multilingual models with different target languages in one batch)
265
+
266
+
267
+ > Generation parameters exclusive to [assistant generation](https://arxiv.org/abs/2211.17192)
268
+
269
+ num_assistant_tokens (`int`, *optional*, defaults to 5):
270
+ Defines the number of _speculative tokens_ that shall be generated by the assistant model before being
271
+ checked by the target model at each iteration. Higher values for `num_assistant_tokens` make the generation
272
+ more _speculative_ : If the assistant model is performant larger speed-ups can be reached, if the assistant
273
+ model requires lots of corrections, lower speed-ups are reached.
274
+
275
+ num_assistant_tokens_schedule (`str`, *optional*, defaults to `"heuristic"`):
276
+ Defines the schedule at which max assistant tokens shall be changed during inference.
277
+ - `"heuristic"`: When all speculative tokens are correct, increase `num_assistant_tokens` by 2 else
278
+ reduce by 1. `num_assistant_tokens` value is persistent over multiple generation calls with the same assistant model.
279
+ - `"heuristic_transient"`: Same as `"heuristic"` but `num_assistant_tokens` is reset to its initial value after each generation call.
280
+ - `"constant"`: `num_assistant_tokens` stays unchanged during generation
281
+
282
+ prompt_lookup_num_tokens (`int`, *optional*, default to `None`):
283
+ The number of tokens to be output as candidate tokens.
284
+
285
+ max_matching_ngram_size (`int`, *optional*, default to `None`):
286
+ The maximum ngram size to be considered for matching in the prompt. Default to 2 if not provided.
287
+
288
+ > Parameters specific to the caching mechanism:
289
+
290
+ cache_implementation (`str`, *optional*, default to `None`):
291
+ Cache class that should be used when generating.
292
+
293
+
294
+ > Wild card
295
+
296
+ generation_kwargs:
297
+ Additional generation kwargs will be forwarded to the `generate` function of the model. Kwargs that are not
298
+ present in `generate`'s signature will be used in the model forward pass.
299
+ """
300
+
301
+ def __init__(self, **kwargs):
302
+ # Parameters that control the length of the output
303
+ self.max_length = kwargs.pop("max_length", 20)
304
+ self.max_new_tokens = kwargs.pop("max_new_tokens", None)
305
+ self.min_length = kwargs.pop("min_length", 0)
306
+ self.min_new_tokens = kwargs.pop("min_new_tokens", None)
307
+ self.early_stopping = kwargs.pop("early_stopping", False)
308
+ self.max_time = kwargs.pop("max_time", None)
309
+
310
+ # Parameters that control the generation strategy used
311
+ self.do_sample = kwargs.pop("do_sample", False)
312
+ self.num_beams = kwargs.pop("num_beams", 1)
313
+ self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
314
+ self.penalty_alpha = kwargs.pop("penalty_alpha", None)
315
+ self.use_cache = kwargs.pop("use_cache", True)
316
+
317
+ # Parameters for manipulation of the model output logits
318
+ self.temperature = kwargs.pop("temperature", 1.0)
319
+ self.top_k = kwargs.pop("top_k", 50)
320
+ self.top_p = kwargs.pop("top_p", 1.0)
321
+ self.typical_p = kwargs.pop("typical_p", 1.0)
322
+ self.epsilon_cutoff = kwargs.pop("epsilon_cutoff", 0.0)
323
+ self.eta_cutoff = kwargs.pop("eta_cutoff", 0.0)
324
+ self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
325
+ self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
326
+ self.encoder_repetition_penalty = kwargs.pop("encoder_repetition_penalty", 1.0)
327
+ self.length_penalty = kwargs.pop("length_penalty", 1.0)
328
+ self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
329
+ self.bad_words_ids = kwargs.pop("bad_words_ids", None)
330
+ self.force_words_ids = kwargs.pop("force_words_ids", None)
331
+ self.renormalize_logits = kwargs.pop("renormalize_logits", False)
332
+ self.constraints = kwargs.pop("constraints", None)
333
+ self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
334
+ self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
335
+ self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
336
+ self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None)
337
+ self.suppress_tokens = kwargs.pop("suppress_tokens", None)
338
+ self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None)
339
+ self.forced_decoder_ids = kwargs.pop("forced_decoder_ids", None)
340
+ self.sequence_bias = kwargs.pop("sequence_bias", None)
341
+ self.guidance_scale = kwargs.pop("guidance_scale", None)
342
+ self.low_memory = kwargs.pop("low_memory", None)
343
+
344
+ # Parameters that define the output variables of `generate`
345
+ self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
346
+ self.output_attentions = kwargs.pop("output_attentions", False)
347
+ self.output_hidden_states = kwargs.pop("output_hidden_states", False)
348
+ self.output_scores = kwargs.pop("output_scores", False)
349
+ self.output_logits = kwargs.pop("output_logits", None)
350
+ self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
351
+
352
+ # Special tokens that can be used at generation time
353
+ self.pad_token_id = kwargs.pop("pad_token_id", None)
354
+ self.bos_token_id = kwargs.pop("bos_token_id", None)
355
+ self.eos_token_id = kwargs.pop("eos_token_id", None)
356
+
357
+ # Generation parameters exclusive to encoder-decoder models
358
+ self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
359
+ self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
360
+
361
+ # Assistant generation
362
+ self.num_assistant_tokens = kwargs.pop("num_assistant_tokens", 5)
363
+ self.num_assistant_tokens_schedule = kwargs.pop("num_assistant_tokens_schedule", "heuristic")
364
+
365
+ # Cache implementation
366
+ self.cache_implementation = kwargs.pop("cache_implementation", None)
367
+
368
+ # Prompt lookup decoding
369
+ self.prompt_lookup_num_tokens = kwargs.pop("prompt_lookup_num_tokens", None)
370
+ self.max_matching_ngram_size = kwargs.pop("max_matching_ngram_size", None)
371
+
372
+ # Wild card
373
+ self.generation_kwargs = kwargs.pop("generation_kwargs", {})
374
+
375
+ # The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the hub
376
+ # interface.
377
+ self._from_model_config = kwargs.pop("_from_model_config", False)
378
+ self._commit_hash = kwargs.pop("_commit_hash", None)
379
+ self.transformers_version = kwargs.pop("transformers_version", __version__)
380
+
381
+ # Additional attributes without default values
382
+ if not self._from_model_config:
383
+ # we don't want to copy values from the model config if we're initializing a `GenerationConfig` from a
384
+ # model's default configuration file
385
+ for key, value in kwargs.items():
386
+ try:
387
+ setattr(self, key, value)
388
+ except AttributeError as err:
389
+ logger.error(f"Can't set {key} with value {value} for {self}")
390
+ raise err
391
+
392
+ # Validate the values of the attributes
393
+ self.validate(is_init=True)
394
+
395
+ def __hash__(self):
396
+ return hash(self.to_json_string(ignore_metadata=True))
397
+
398
+ def __eq__(self, other):
399
+ if not isinstance(other, GenerationConfig):
400
+ return False
401
+
402
+ self_without_metadata = self.to_json_string(use_diff=False, ignore_metadata=True)
403
+ other_without_metadata = other.to_json_string(use_diff=False, ignore_metadata=True)
404
+ return self_without_metadata == other_without_metadata
405
+
406
+ def __repr__(self):
407
+ return f"{self.__class__.__name__} {self.to_json_string(ignore_metadata=True)}"
408
+
409
+ def get_generation_mode(self, assistant_model: Optional["PreTrainedModel"] = None) -> GenerationMode:
410
+ """
411
+ Returns the generation mode triggered by the [`GenerationConfig`] instance.
412
+
413
+ Arg:
414
+ assistant_model (`PreTrainedModel`, *optional*):
415
+ The assistant model to be used for assisted generation. If set, the generation mode will be
416
+ assisted generation.
417
+
418
+ Returns:
419
+ `GenerationMode`: The generation mode triggered by the instance.
420
+ """
421
+ # TODO joao: find out a way of not depending on external fields (e.g. `assistant_model`), then make this a
422
+ # property and part of the `__repr__`
423
+ if self.constraints is not None or self.force_words_ids is not None:
424
+ generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH
425
+ elif self.num_beams == 1:
426
+ if self.do_sample is False:
427
+ if (
428
+ self.top_k is not None
429
+ and self.top_k > 1
430
+ and self.penalty_alpha is not None
431
+ and self.penalty_alpha > 0
432
+ ):
433
+ generation_mode = GenerationMode.CONTRASTIVE_SEARCH
434
+ else:
435
+ generation_mode = GenerationMode.GREEDY_SEARCH
436
+ else:
437
+ generation_mode = GenerationMode.SAMPLE
438
+ else:
439
+ if self.num_beam_groups > 1:
440
+ generation_mode = GenerationMode.GROUP_BEAM_SEARCH
441
+ elif self.do_sample is True:
442
+ generation_mode = GenerationMode.BEAM_SAMPLE
443
+ else:
444
+ generation_mode = GenerationMode.BEAM_SEARCH
445
+
446
+ # Assisted generation may extend some generation modes
447
+ if assistant_model is not None or self.prompt_lookup_num_tokens is not None:
448
+ if generation_mode in ("greedy_search", "sample"):
449
+ generation_mode = GenerationMode.ASSISTED_GENERATION
450
+ else:
451
+ raise ValueError(
452
+ "You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate "
453
+ "is only supported with Greedy Search and Sample."
454
+ )
455
+ return generation_mode
456
+
457
+ def validate(self, is_init=False):
458
+ """
459
+ Validates the values of the attributes of the [`GenerationConfig`] instance. Raises exceptions in the presence
460
+ of parameterization that can be detected as incorrect from the configuration instance alone.
461
+
462
+ Note that some parameters not validated here are best validated at generate runtime, as they may depend on
463
+ other inputs and/or the model, such as parameters related to the generation length.
464
+
465
+ Arg:
466
+ is_init (`bool`, *optional*, defaults to `False`):
467
+ Whether the validation is performed during the initialization of the instance.
468
+ """
469
+
470
+ # Validation of individual attributes
471
+ if self.early_stopping not in {True, False, "never"}:
472
+ raise ValueError(f"`early_stopping` must be a boolean or 'never', but is {self.early_stopping}.")
473
+ if self.max_new_tokens is not None and self.max_new_tokens <= 0:
474
+ raise ValueError(f"`max_new_tokens` must be greater than 0, but is {self.max_new_tokens}.")
475
+
476
+ # Validation of attribute relations:
477
+ fix_location = ""
478
+ if is_init:
479
+ fix_location = (
480
+ " This was detected when initializing the generation config instance, which means the corresponding "
481
+ "file may hold incorrect parameterization and should be fixed."
482
+ )
483
+
484
+ # 1. detect sampling-only parameterization when not in sampling mode
485
+ if self.do_sample is False:
486
+ greedy_wrong_parameter_msg = (
487
+ "`do_sample` is set to `False`. However, `{flag_name}` is set to `{flag_value}` -- this flag is only "
488
+ "used in sample-based generation modes. You should set `do_sample=True` or unset `{flag_name}`."
489
+ + fix_location
490
+ )
491
+ if self.temperature is not None and self.temperature != 1.0:
492
+ warnings.warn(
493
+ greedy_wrong_parameter_msg.format(flag_name="temperature", flag_value=self.temperature),
494
+ UserWarning,
495
+ )
496
+ if self.top_p is not None and self.top_p != 1.0:
497
+ warnings.warn(
498
+ greedy_wrong_parameter_msg.format(flag_name="top_p", flag_value=self.top_p),
499
+ UserWarning,
500
+ )
501
+ if self.typical_p is not None and self.typical_p != 1.0:
502
+ warnings.warn(
503
+ greedy_wrong_parameter_msg.format(flag_name="typical_p", flag_value=self.typical_p),
504
+ UserWarning,
505
+ )
506
+ if (
507
+ self.top_k is not None and self.top_k != 50 and self.penalty_alpha is None
508
+ ): # contrastive search uses top_k
509
+ warnings.warn(
510
+ greedy_wrong_parameter_msg.format(flag_name="top_k", flag_value=self.top_k),
511
+ UserWarning,
512
+ )
513
+ if self.epsilon_cutoff is not None and self.epsilon_cutoff != 0.0:
514
+ warnings.warn(
515
+ greedy_wrong_parameter_msg.format(flag_name="epsilon_cutoff", flag_value=self.epsilon_cutoff),
516
+ UserWarning,
517
+ )
518
+ if self.eta_cutoff is not None and self.eta_cutoff != 0.0:
519
+ warnings.warn(
520
+ greedy_wrong_parameter_msg.format(flag_name="eta_cutoff", flag_value=self.eta_cutoff),
521
+ UserWarning,
522
+ )
523
+
524
+ # 2. detect beam-only parameterization when not in beam mode
525
+ if self.num_beams is None:
526
+ warnings.warn("`num_beams` is set to None - defaulting to 1.", UserWarning)
527
+ self.num_beams = 1
528
+
529
+ if self.num_beams == 1:
530
+ single_beam_wrong_parameter_msg = (
531
+ "`num_beams` is set to 1. However, `{flag_name}` is set to `{flag_value}` -- this flag is only used "
532
+ "in beam-based generation modes. You should set `num_beams>1` or unset `{flag_name}`." + fix_location
533
+ )
534
+ if self.early_stopping is not False:
535
+ warnings.warn(
536
+ single_beam_wrong_parameter_msg.format(flag_name="early_stopping", flag_value=self.early_stopping),
537
+ UserWarning,
538
+ )
539
+ if self.num_beam_groups is not None and self.num_beam_groups != 1:
540
+ warnings.warn(
541
+ single_beam_wrong_parameter_msg.format(
542
+ flag_name="num_beam_groups", flag_value=self.num_beam_groups
543
+ ),
544
+ UserWarning,
545
+ )
546
+ if self.diversity_penalty is not None and self.diversity_penalty != 0.0:
547
+ warnings.warn(
548
+ single_beam_wrong_parameter_msg.format(
549
+ flag_name="diversity_penalty", flag_value=self.diversity_penalty
550
+ ),
551
+ UserWarning,
552
+ )
553
+ if self.length_penalty is not None and self.length_penalty != 1.0:
554
+ warnings.warn(
555
+ single_beam_wrong_parameter_msg.format(flag_name="length_penalty", flag_value=self.length_penalty),
556
+ UserWarning,
557
+ )
558
+ if self.constraints is not None:
559
+ warnings.warn(
560
+ single_beam_wrong_parameter_msg.format(flag_name="constraints", flag_value=self.constraints),
561
+ UserWarning,
562
+ )
563
+
564
+ # 3. detect incorrect paramaterization specific to advanced beam modes
565
+ else:
566
+ # constrained beam search
567
+ if self.constraints is not None or self.force_words_ids is not None:
568
+ constrained_wrong_parameter_msg = (
569
+ "one of `constraints`, `force_words_ids` is not `None`, triggering constrained beam search. However, "
570
+ "`{flag_name}` is set to `{flag_value}`, which is incompatible with this generation mode. Set "
571
+ "`constraints` and `force_words_ids` to `None` or unset `{flag_name}` to continue." + fix_location
572
+ )
573
+ if self.do_sample is True:
574
+ raise ValueError(
575
+ constrained_wrong_parameter_msg.format(flag_name="do_sample", flag_value=self.do_sample)
576
+ )
577
+ if self.num_beam_groups is not None and self.num_beam_groups != 1:
578
+ raise ValueError(
579
+ constrained_wrong_parameter_msg.format(
580
+ flag_name="num_beam_groups", flag_value=self.num_beam_groups
581
+ )
582
+ )
583
+ # group beam search
584
+ if self.diversity_penalty != 0.0 or self.num_beam_groups != 1:
585
+ group_error_prefix = (
586
+ "`diversity_penalty` is not 0.0 or `num_beam_groups` is not 1, triggering group beam search. In "
587
+ "this generation mode, "
588
+ )
589
+ if self.do_sample is True:
590
+ raise ValueError(group_error_prefix + "`do_sample` must be set to `False`")
591
+ if self.num_beams % self.num_beam_groups != 0:
592
+ raise ValueError(group_error_prefix + "`num_beams` should be divisible by `num_beam_groups`")
593
+ if self.diversity_penalty == 0.0:
594
+ raise ValueError(
595
+ group_error_prefix
596
+ + "`diversity_penalty` should be greater than `0.0`, otherwise your groups will be identical."
597
+ )
598
+
599
+ # 4. check `num_return_sequences`
600
+ if self.num_return_sequences != 1:
601
+ if self.num_beams == 1:
602
+ if self.do_sample is False:
603
+ raise ValueError(
604
+ "Greedy methods without beam search do not support `num_return_sequences` different than 1 "
605
+ f"(got {self.num_return_sequences})."
606
+ )
607
+ elif self.num_return_sequences > self.num_beams:
608
+ raise ValueError(
609
+ f"`num_return_sequences` ({self.num_return_sequences}) has to be smaller or equal to `num_beams` "
610
+ f"({self.num_beams})."
611
+ )
612
+
613
+ # 5. check common issue: passing `generate` arguments inside the generation config
614
+ generate_arguments = (
615
+ "logits_processor",
616
+ "stopping_criteria",
617
+ "prefix_allowed_tokens_fn",
618
+ "synced_gpus",
619
+ "assistant_model",
620
+ "streamer",
621
+ "negative_prompt_ids",
622
+ "negative_prompt_attention_mask",
623
+ )
624
+ for arg in generate_arguments:
625
+ if hasattr(self, arg):
626
+ raise ValueError(
627
+ f"Argument `{arg}` is not a valid argument of `GenerationConfig`. It should be passed to "
628
+ "`generate()` (or a pipeline) directly."
629
+ )
630
+
631
+ def save_pretrained(
632
+ self,
633
+ save_directory: Union[str, os.PathLike],
634
+ config_file_name: Optional[Union[str, os.PathLike]] = None,
635
+ push_to_hub: bool = False,
636
+ **kwargs,
637
+ ):
638
+ r"""
639
+ Save a generation configuration object to the directory `save_directory`, so that it can be re-loaded using the
640
+ [`~GenerationConfig.from_pretrained`] class method.
641
+
642
+ Args:
643
+ save_directory (`str` or `os.PathLike`):
644
+ Directory where the configuration JSON file will be saved (will be created if it does not exist).
645
+ config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`):
646
+ Name of the generation configuration JSON file to be saved in `save_directory`.
647
+ push_to_hub (`bool`, *optional*, defaults to `False`):
648
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
649
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
650
+ namespace).
651
+ kwargs (`Dict[str, Any]`, *optional*):
652
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
653
+ """
654
+
655
+ # At save time, validate the instance -- if any warning/exception is thrown, we refuse to save the instance.
656
+ # This strictness is enforced to prevent bad configurations from being saved and re-used.
657
+ try:
658
+ with warnings.catch_warnings(record=True) as caught_warnings:
659
+ self.validate()
660
+ if len(caught_warnings) > 0:
661
+ raise ValueError(str([w.message for w in caught_warnings]))
662
+ except ValueError as exc:
663
+ raise ValueError(
664
+ "The generation config instance is invalid -- `.validate()` throws warnings and/or exceptions. "
665
+ "Fix these issues to save the configuration.\n\nThrown during validation:\n" + str(exc)
666
+ )
667
+
668
+ use_auth_token = kwargs.pop("use_auth_token", None)
669
+
670
+ if use_auth_token is not None:
671
+ warnings.warn(
672
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
673
+ FutureWarning,
674
+ )
675
+ if kwargs.get("token", None) is not None:
676
+ raise ValueError(
677
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
678
+ )
679
+ kwargs["token"] = use_auth_token
680
+
681
+ config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME
682
+
683
+ if os.path.isfile(save_directory):
684
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
685
+
686
+ os.makedirs(save_directory, exist_ok=True)
687
+
688
+ if push_to_hub:
689
+ commit_message = kwargs.pop("commit_message", None)
690
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
691
+ repo_id = self._create_repo(repo_id, **kwargs)
692
+ files_timestamps = self._get_files_timestamps(save_directory)
693
+
694
+ output_config_file = os.path.join(save_directory, config_file_name)
695
+
696
+ self.to_json_file(output_config_file, use_diff=True)
697
+ logger.info(f"Configuration saved in {output_config_file}")
698
+
699
+ if push_to_hub:
700
+ self._upload_modified_files(
701
+ save_directory,
702
+ repo_id,
703
+ files_timestamps,
704
+ commit_message=commit_message,
705
+ token=kwargs.get("token"),
706
+ )
707
+
708
+ @classmethod
709
+ def from_pretrained(
710
+ cls,
711
+ pretrained_model_name: Union[str, os.PathLike],
712
+ config_file_name: Optional[Union[str, os.PathLike]] = None,
713
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
714
+ force_download: bool = False,
715
+ local_files_only: bool = False,
716
+ token: Optional[Union[str, bool]] = None,
717
+ revision: str = "main",
718
+ **kwargs,
719
+ ) -> "GenerationConfig":
720
+ r"""
721
+ Instantiate a [`GenerationConfig`] from a generation configuration file.
722
+
723
+ Args:
724
+ pretrained_model_name (`str` or `os.PathLike`):
725
+ This can be either:
726
+
727
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
728
+ huggingface.co.
729
+ - a path to a *directory* containing a configuration file saved using the
730
+ [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
731
+ config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`):
732
+ Name of the generation configuration JSON file to be loaded from `pretrained_model_name`.
733
+ cache_dir (`str` or `os.PathLike`, *optional*):
734
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
735
+ standard cache should not be used.
736
+ force_download (`bool`, *optional*, defaults to `False`):
737
+ Whether or not to force to (re-)download the configuration files and override the cached versions if
738
+ they exist.
739
+ resume_download (`bool`, *optional*, defaults to `False`):
740
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
741
+ exists.
742
+ proxies (`Dict[str, str]`, *optional*):
743
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
744
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
745
+ token (`str` or `bool`, *optional*):
746
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
747
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
748
+ revision (`str`, *optional*, defaults to `"main"`):
749
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
750
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
751
+ identifier allowed by git.
752
+
753
+ <Tip>
754
+
755
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
756
+
757
+ </Tip>
758
+
759
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
760
+ If `False`, then this function returns just the final configuration object.
761
+
762
+ If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
763
+ dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
764
+ part of `kwargs` which has not been used to update `config` and is otherwise ignored.
765
+ subfolder (`str`, *optional*, defaults to `""`):
766
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
767
+ specify the folder name here.
768
+ kwargs (`Dict[str, Any]`, *optional*):
769
+ The values in kwargs of any keys which are configuration attributes will be used to override the loaded
770
+ values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
771
+ by the `return_unused_kwargs` keyword parameter.
772
+
773
+ Returns:
774
+ [`GenerationConfig`]: The configuration object instantiated from this pretrained model.
775
+
776
+ Examples:
777
+
778
+ ```python
779
+ >>> from transformers import GenerationConfig
780
+
781
+ >>> # Download configuration from huggingface.co and cache.
782
+ >>> generation_config = GenerationConfig.from_pretrained("openai-community/gpt2")
783
+
784
+ >>> # E.g. config was saved using *save_pretrained('./test/saved_model/')*
785
+ >>> generation_config.save_pretrained("./test/saved_model/")
786
+ >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/")
787
+
788
+ >>> # You can also specify configuration names to your generation configuration file
789
+ >>> generation_config.save_pretrained("./test/saved_model/", config_file_name="my_configuration.json")
790
+ >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/", "my_configuration.json")
791
+
792
+ >>> # If you'd like to try a minor variation to an existing configuration, you can also pass generation
793
+ >>> # arguments to `.from_pretrained()`. Be mindful that typos and unused arguments will be ignored
794
+ >>> generation_config, unused_kwargs = GenerationConfig.from_pretrained(
795
+ ... "openai-community/gpt2", top_k=1, foo=False, do_sample=True, return_unused_kwargs=True
796
+ ... )
797
+ >>> generation_config.top_k
798
+ 1
799
+
800
+ >>> unused_kwargs
801
+ {'foo': False}
802
+ ```"""
803
+ config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME
804
+
805
+ resume_download = kwargs.pop("resume_download", False)
806
+ proxies = kwargs.pop("proxies", None)
807
+ use_auth_token = kwargs.pop("use_auth_token", None)
808
+ subfolder = kwargs.pop("subfolder", "")
809
+ from_pipeline = kwargs.pop("_from_pipeline", None)
810
+ from_auto_class = kwargs.pop("_from_auto", False)
811
+ commit_hash = kwargs.pop("_commit_hash", None)
812
+
813
+ if use_auth_token is not None:
814
+ warnings.warn(
815
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
816
+ FutureWarning,
817
+ )
818
+ if token is not None:
819
+ raise ValueError(
820
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
821
+ )
822
+ token = use_auth_token
823
+
824
+ user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
825
+ if from_pipeline is not None:
826
+ user_agent["using_pipeline"] = from_pipeline
827
+
828
+ config_path = os.path.join(pretrained_model_name, config_file_name)
829
+ config_path = str(config_path)
830
+
831
+ is_local = os.path.exists(config_path)
832
+ if os.path.isfile(os.path.join(subfolder, config_path)):
833
+ # Special case when config_path is a local file
834
+ resolved_config_file = config_path
835
+ is_local = True
836
+ elif is_remote_url(config_path):
837
+ configuration_file = config_path
838
+ resolved_config_file = download_url(config_path)
839
+ else:
840
+ configuration_file = config_file_name
841
+ try:
842
+ # Load from local folder or from cache or download from model Hub and cache
843
+ resolved_config_file = cached_file(
844
+ pretrained_model_name,
845
+ configuration_file,
846
+ cache_dir=cache_dir,
847
+ force_download=force_download,
848
+ proxies=proxies,
849
+ resume_download=resume_download,
850
+ local_files_only=local_files_only,
851
+ token=token,
852
+ user_agent=user_agent,
853
+ revision=revision,
854
+ subfolder=subfolder,
855
+ _commit_hash=commit_hash,
856
+ )
857
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
858
+ except EnvironmentError:
859
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
860
+ # the original exception.
861
+ raise
862
+ except Exception:
863
+ # For any other exception, we throw a generic error.
864
+ raise EnvironmentError(
865
+ f"Can't load the configuration of '{pretrained_model_name}'. If you were trying to load it"
866
+ " from 'https://huggingface.co/models', make sure you don't have a local directory with the same"
867
+ f" name. Otherwise, make sure '{pretrained_model_name}' is the correct path to a directory"
868
+ f" containing a {configuration_file} file"
869
+ )
870
+
871
+ try:
872
+ # Load config dict
873
+ config_dict = cls._dict_from_json_file(resolved_config_file)
874
+ config_dict["_commit_hash"] = commit_hash
875
+ except (json.JSONDecodeError, UnicodeDecodeError):
876
+ raise EnvironmentError(
877
+ f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
878
+ )
879
+
880
+ if is_local:
881
+ logger.info(f"loading configuration file {resolved_config_file}")
882
+ else:
883
+ logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}")
884
+
885
+ if kwargs.get("return_unused_kwargs") is True:
886
+ config, unused_kwargs = cls.from_dict(config_dict, **kwargs)
887
+ config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
888
+ return config, unused_kwargs
889
+ else:
890
+ config = cls.from_dict(config_dict, **kwargs)
891
+ config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
892
+ return config
893
+
894
+ @classmethod
895
+ def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
896
+ with open(json_file, "r", encoding="utf-8") as reader:
897
+ text = reader.read()
898
+ return json.loads(text)
899
+
900
+ @classmethod
901
+ def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "GenerationConfig":
902
+ """
903
+ Instantiates a [`GenerationConfig`] from a Python dictionary of parameters.
904
+
905
+ Args:
906
+ config_dict (`Dict[str, Any]`):
907
+ Dictionary that will be used to instantiate the configuration object.
908
+ kwargs (`Dict[str, Any]`):
909
+ Additional parameters from which to initialize the configuration object.
910
+
911
+ Returns:
912
+ [`GenerationConfig`]: The configuration object instantiated from those parameters.
913
+ """
914
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
915
+ # Those arguments may be passed along for our internal telemetry.
916
+ # We remove them so they don't appear in `return_unused_kwargs`.
917
+ kwargs.pop("_from_auto", None)
918
+ kwargs.pop("_from_pipeline", None)
919
+ # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.
920
+ if "_commit_hash" in kwargs and "_commit_hash" in config_dict:
921
+ kwargs["_commit_hash"] = config_dict["_commit_hash"]
922
+
923
+ # The line below allows model-specific config to be loaded as well through kwargs, with safety checks.
924
+ # See https://github.com/huggingface/transformers/pull/21269
925
+ config = cls(**{**config_dict, **kwargs})
926
+ unused_kwargs = config.update(**kwargs)
927
+
928
+ logger.info(f"Generate config {config}")
929
+ if return_unused_kwargs:
930
+ return config, unused_kwargs
931
+ else:
932
+ return config
933
+
934
+ def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
935
+ """
936
+ Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
937
+ converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
938
+ string, which can then be stored in the json format.
939
+ """
940
+ if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
941
+ d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
942
+ for value in d.values():
943
+ if isinstance(value, dict):
944
+ self.dict_torch_dtype_to_str(value)
945
+
946
+ def to_diff_dict(self) -> Dict[str, Any]:
947
+ """
948
+ Removes all attributes from config which correspond to the default config attributes for better readability and
949
+ serializes to a Python dictionary.
950
+
951
+ Returns:
952
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
953
+ """
954
+ config_dict = self.to_dict()
955
+
956
+ # get the default config dict
957
+ default_config_dict = GenerationConfig().to_dict()
958
+
959
+ serializable_config_dict = {}
960
+
961
+ # only serialize values that differ from the default config
962
+ for key, value in config_dict.items():
963
+ if key not in default_config_dict or key == "transformers_version" or value != default_config_dict[key]:
964
+ serializable_config_dict[key] = value
965
+
966
+ self.dict_torch_dtype_to_str(serializable_config_dict)
967
+ return serializable_config_dict
968
+
969
+ def to_dict(self) -> Dict[str, Any]:
970
+ """
971
+ Serializes this instance to a Python dictionary.
972
+
973
+ Returns:
974
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
975
+ """
976
+ output = copy.deepcopy(self.__dict__)
977
+
978
+ # Fields to ignore at serialization time
979
+ if "_commit_hash" in output:
980
+ del output["_commit_hash"]
981
+ if "_original_object_hash" in output:
982
+ del output["_original_object_hash"]
983
+
984
+ # Transformers version when serializing this file
985
+ output["transformers_version"] = __version__
986
+
987
+ self.dict_torch_dtype_to_str(output)
988
+ return output
989
+
990
+ def to_json_string(self, use_diff: bool = True, ignore_metadata: bool = False) -> str:
991
+ """
992
+ Serializes this instance to a JSON string.
993
+
994
+ Args:
995
+ use_diff (`bool`, *optional*, defaults to `True`):
996
+ If set to `True`, only the difference between the config instance and the default `GenerationConfig()`
997
+ is serialized to JSON string.
998
+ ignore_metadata (`bool`, *optional*, defaults to `False`):
999
+ Whether to ignore the metadata fields present in the instance
1000
+
1001
+ Returns:
1002
+ `str`: String containing all the attributes that make up this configuration instance in JSON format.
1003
+ """
1004
+ if use_diff is True:
1005
+ config_dict = self.to_diff_dict()
1006
+ else:
1007
+ config_dict = self.to_dict()
1008
+
1009
+ if ignore_metadata:
1010
+ for metadata_field in METADATA_FIELDS:
1011
+ config_dict.pop(metadata_field, None)
1012
+
1013
+ def convert_keys_to_string(obj):
1014
+ if isinstance(obj, dict):
1015
+ return {str(key): convert_keys_to_string(value) for key, value in obj.items()}
1016
+ elif isinstance(obj, list):
1017
+ return [convert_keys_to_string(item) for item in obj]
1018
+ else:
1019
+ return obj
1020
+
1021
+ config_dict = convert_keys_to_string(config_dict)
1022
+
1023
+ return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
1024
+
1025
+ def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
1026
+ """
1027
+ Save this instance to a JSON file.
1028
+
1029
+ Args:
1030
+ json_file_path (`str` or `os.PathLike`):
1031
+ Path to the JSON file in which this configuration instance's parameters will be saved.
1032
+ use_diff (`bool`, *optional*, defaults to `True`):
1033
+ If set to `True`, only the difference between the config instance and the default `GenerationConfig()`
1034
+ is serialized to JSON file.
1035
+ """
1036
+ with open(json_file_path, "w", encoding="utf-8") as writer:
1037
+ writer.write(self.to_json_string(use_diff=use_diff))
1038
+
1039
+ @classmethod
1040
+ def from_model_config(cls, model_config: PretrainedConfig) -> "GenerationConfig":
1041
+ """
1042
+ Instantiates a [`GenerationConfig`] from a [`PretrainedConfig`]. This function is useful to convert legacy
1043
+ [`PretrainedConfig`] objects, which may contain generation parameters, into a stand-alone [`GenerationConfig`].
1044
+
1045
+ Args:
1046
+ model_config (`PretrainedConfig`):
1047
+ The model config that will be used to instantiate the generation config.
1048
+
1049
+ Returns:
1050
+ [`GenerationConfig`]: The configuration object instantiated from those parameters.
1051
+ """
1052
+ config_dict = model_config.to_dict()
1053
+ config_dict.pop("_from_model_config", None)
1054
+ config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True)
1055
+
1056
+ # Special case: some models have generation attributes set in the decoder. Use them if still unset in the
1057
+ # generation config.
1058
+ for decoder_name in ("decoder", "generator", "text_config"):
1059
+ if decoder_name in config_dict:
1060
+ default_generation_config = GenerationConfig()
1061
+ decoder_config = config_dict[decoder_name]
1062
+ for attr in config.to_dict().keys():
1063
+ if attr in decoder_config and getattr(config, attr) == getattr(default_generation_config, attr):
1064
+ setattr(config, attr, decoder_config[attr])
1065
+
1066
+ config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
1067
+ return config
1068
+
1069
+ def update(self, **kwargs):
1070
+ """
1071
+ Updates attributes of this class instance with attributes from `kwargs` if they match existing atributtes,
1072
+ returning all the unused kwargs.
1073
+
1074
+ Args:
1075
+ kwargs (`Dict[str, Any]`):
1076
+ Dictionary of attributes to tentatively update this class.
1077
+
1078
+ Returns:
1079
+ `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
1080
+ """
1081
+ to_remove = []
1082
+ for key, value in kwargs.items():
1083
+ if hasattr(self, key):
1084
+ setattr(self, key, value)
1085
+ to_remove.append(key)
1086
+
1087
+ # Confirm that the updated instance is still valid
1088
+ self.validate()
1089
+
1090
+ # Remove all the attributes that were updated, without modifying the input dict
1091
+ unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
1092
+ return unused_kwargs
env-llmeval/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import inspect
17
+
18
+ import jax
19
+ import jax.lax as lax
20
+ import jax.numpy as jnp
21
+
22
+ from ..utils import add_start_docstrings
23
+ from ..utils.logging import get_logger
24
+
25
+
26
+ logger = get_logger(__name__)
27
+
28
+
29
+ LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
30
+ Args:
31
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
32
+ Indices of input sequence tokens in the vocabulary.
33
+
34
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
35
+ [`PreTrainedTokenizer.__call__`] for details.
36
+
37
+ [What are input IDs?](../glossary#input-ids)
38
+ scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
39
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
40
+ search or log softmax for each vocabulary token when using beam search
41
+ kwargs (`Dict[str, Any]`, *optional*):
42
+ Additional logits processor specific kwargs.
43
+
44
+ Return:
45
+ `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
46
+
47
+ """
48
+
49
+
50
+ class FlaxLogitsProcessor:
51
+ """Abstract base class for all logit processors that can be applied during generation."""
52
+
53
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
54
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
55
+ """Flax method for processing logits."""
56
+ raise NotImplementedError(
57
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
58
+ )
59
+
60
+
61
+ class FlaxLogitsWarper:
62
+ """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
63
+
64
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
65
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
66
+ """Flax method for warping logits."""
67
+ raise NotImplementedError(
68
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
69
+ )
70
+
71
+
72
+ class FlaxLogitsProcessorList(list):
73
+ """
74
+ This class can be used to create a list of [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to subsequently process
75
+ a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each
76
+ [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to the inputs.
77
+ """
78
+
79
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
80
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int, **kwargs) -> jnp.ndarray:
81
+ for processor in self:
82
+ function_args = inspect.signature(processor.__call__).parameters
83
+ if len(function_args) > 3:
84
+ if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
85
+ raise ValueError(
86
+ f"Make sure that all the required parameters: {list(function_args.keys())} for "
87
+ f"{processor.__class__} are passed to the logits processor."
88
+ )
89
+ scores = processor(input_ids, scores, cur_len, **kwargs)
90
+ else:
91
+ scores = processor(input_ids, scores, cur_len)
92
+ return scores
93
+
94
+
95
+ class FlaxTemperatureLogitsWarper(FlaxLogitsWarper):
96
+ r"""
97
+ [`FlaxLogitsWarper`] for temperature (exponential scaling output probability distribution).
98
+
99
+ Args:
100
+ temperature (`float`):
101
+ The value used to module the logits distribution.
102
+ """
103
+
104
+ def __init__(self, temperature: float):
105
+ if not isinstance(temperature, float) or not (temperature > 0):
106
+ raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
107
+
108
+ self.temperature = temperature
109
+
110
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
111
+ scores = scores / self.temperature
112
+ return scores
113
+
114
+
115
+ class FlaxTopPLogitsWarper(FlaxLogitsWarper):
116
+ """
117
+ [`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off.
118
+
119
+ Args:
120
+ top_p (`float`):
121
+ If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
122
+ higher are kept for generation.
123
+ filter_value (`float`, *optional*, defaults to -inf):
124
+ All filtered values will be set to this float value.
125
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
126
+ Minimum number of tokens that cannot be filtered.
127
+ """
128
+
129
+ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
130
+ if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
131
+ raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
132
+ if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
133
+ raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
134
+
135
+ self.top_p = top_p
136
+ self.filter_value = filter_value
137
+ self.min_tokens_to_keep = min_tokens_to_keep
138
+
139
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
140
+ topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1])
141
+
142
+ mask_scores = jnp.full_like(scores, self.filter_value)
143
+ cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1)
144
+ score_mask = cumulative_probs < self.top_p
145
+
146
+ # include the token that is higher than top_p as well
147
+ score_mask = jnp.roll(score_mask, 1)
148
+ score_mask |= score_mask.at[:, 0].set(True)
149
+
150
+ # min tokens to keep
151
+ score_mask = score_mask.at[:, : self.min_tokens_to_keep].set(True)
152
+
153
+ topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores)
154
+ next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1]
155
+
156
+ return next_scores
157
+
158
+
159
+ class FlaxTopKLogitsWarper(FlaxLogitsWarper):
160
+ r"""
161
+ [`FlaxLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.
162
+
163
+ Args:
164
+ top_k (`int`):
165
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
166
+ filter_value (`float`, *optional*, defaults to -inf):
167
+ All filtered values will be set to this float value.
168
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
169
+ Minimum number of tokens that cannot be filtered.
170
+ """
171
+
172
+ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
173
+ if not isinstance(top_k, int) or top_k <= 0:
174
+ raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
175
+
176
+ self.top_k = max(top_k, min_tokens_to_keep)
177
+ self.filter_value = filter_value
178
+
179
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
180
+ batch_size, vocab_size = scores.shape
181
+ next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value)
182
+
183
+ topk = min(self.top_k, scores.shape[-1]) # Safety check
184
+ topk_scores, topk_indices = lax.top_k(scores, topk)
185
+ shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten()
186
+ topk_scores_flat = topk_scores.flatten()
187
+ topk_indices_flat = topk_indices.flatten() + shift
188
+
189
+ next_scores_flat = next_scores_flat.at[topk_indices_flat].set(topk_scores_flat)
190
+ next_scores = next_scores_flat.reshape(batch_size, vocab_size)
191
+ return next_scores
192
+
193
+
194
+ class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor):
195
+ r"""
196
+ [`FlaxLogitsProcessor`] that enforces the specified token as the first generated token.
197
+
198
+ Args:
199
+ bos_token_id (`int`):
200
+ The id of the token to force as the first generated token.
201
+ """
202
+
203
+ def __init__(self, bos_token_id: int):
204
+ self.bos_token_id = bos_token_id
205
+
206
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
207
+ new_scores = jnp.full(scores.shape, -float("inf"))
208
+
209
+ apply_penalty = 1 - jnp.bool_(cur_len - 1)
210
+
211
+ scores = jnp.where(apply_penalty, new_scores.at[:, self.bos_token_id].set(0), scores)
212
+
213
+ return scores
214
+
215
+
216
+ class FlaxForcedEOSTokenLogitsProcessor(FlaxLogitsProcessor):
217
+ r"""
218
+ [`FlaxLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
219
+
220
+ Args:
221
+ max_length (`int`):
222
+ The maximum length of the sequence to be generated.
223
+ eos_token_id (`int`):
224
+ The id of the token to force as the last generated token when `max_length` is reached.
225
+ """
226
+
227
+ def __init__(self, max_length: int, eos_token_id: int):
228
+ self.max_length = max_length
229
+ self.eos_token_id = eos_token_id
230
+
231
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
232
+ new_scores = jnp.full(scores.shape, -float("inf"))
233
+
234
+ apply_penalty = 1 - jnp.bool_(cur_len - self.max_length + 1)
235
+
236
+ scores = jnp.where(apply_penalty, new_scores.at[:, self.eos_token_id].set(0), scores)
237
+
238
+ return scores
239
+
240
+
241
+ class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor):
242
+ r"""
243
+ [`FlaxLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
244
+
245
+ Args:
246
+ min_length (`int`):
247
+ The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
248
+ eos_token_id (`int`):
249
+ The id of the *end-of-sequence* token.
250
+ """
251
+
252
+ def __init__(self, min_length: int, eos_token_id: int):
253
+ if not isinstance(min_length, int) or min_length < 0:
254
+ raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
255
+
256
+ if not isinstance(eos_token_id, int) or eos_token_id < 0:
257
+ raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
258
+
259
+ self.min_length = min_length
260
+ self.eos_token_id = eos_token_id
261
+
262
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
263
+ # create boolean flag to decide if min length penalty should be applied
264
+ apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1)
265
+
266
+ scores = jnp.where(apply_penalty, scores.at[:, self.eos_token_id].set(-float("inf")), scores)
267
+
268
+ return scores
269
+
270
+
271
+ class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor):
272
+ r"""
273
+ [`FlaxLogitsProcessor`] supressing a list of tokens as soon as the `generate` function starts generating using
274
+ `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the
275
+ begining of the generation.
276
+
277
+ Args:
278
+ begin_suppress_tokens (`List[int]`):
279
+ Tokens to not sample.
280
+ begin_index (`int`):
281
+ Index where the tokens are suppressed.
282
+ """
283
+
284
+ def __init__(self, begin_suppress_tokens, begin_index):
285
+ self.begin_suppress_tokens = list(begin_suppress_tokens)
286
+ self.begin_index = begin_index
287
+
288
+ def __call__(self, input_ids, scores, cur_len: int):
289
+ apply_penalty = 1 - jnp.bool_(cur_len - self.begin_index)
290
+
291
+ scores = jnp.where(apply_penalty, scores.at[:, self.begin_suppress_tokens].set(-float("inf")), scores)
292
+
293
+ return scores
294
+
295
+
296
+ class FlaxSuppressTokensLogitsProcessor(FlaxLogitsProcessor):
297
+ r"""
298
+ [`FlaxLogitsProcessor`] suppressing a list of tokens at each decoding step. The processor will set their log probs
299
+ to be `-inf` so they are not sampled.
300
+
301
+ Args:
302
+ suppress_tokens (`list`):
303
+ Tokens to not sample.
304
+ """
305
+
306
+ def __init__(self, suppress_tokens: list):
307
+ self.suppress_tokens = list(suppress_tokens)
308
+
309
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
310
+ scores = scores.at[..., self.suppress_tokens].set(-float("inf"))
311
+
312
+ return scores
313
+
314
+
315
+ class FlaxForceTokensLogitsProcessor(FlaxLogitsProcessor):
316
+ r"""
317
+ [`FlaxLogitsProcessor`] that takes a list of pairs of integers which indicates a mapping from generation indices to
318
+ token indices that will be forced before sampling. The processor will set their log probs to 0 and all other tokens
319
+ to `-inf` so that they are sampled at their corresponding index.
320
+
321
+ Args:
322
+ force_token_map (`list`):
323
+ Map giving token ids and indices where they will be forced to be sampled.
324
+ """
325
+
326
+ def __init__(self, force_token_map):
327
+ force_token_map = dict(force_token_map)
328
+ # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
329
+ # index of the array corresponds to the index of the token to be forced, for XLA compatibility.
330
+ # Indexes without forced tokens will have a negative value.
331
+ force_token_array = jnp.ones((max(force_token_map.keys()) + 1), dtype=jnp.int32) * -1
332
+ for index, token in force_token_map.items():
333
+ if token is not None:
334
+ force_token_array = force_token_array.at[index].set(token)
335
+ self.force_token_array = jnp.int32(force_token_array)
336
+
337
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
338
+ def _force_token(generation_idx):
339
+ batch_size = scores.shape[0]
340
+ current_token = self.force_token_array[generation_idx]
341
+
342
+ new_scores = jnp.ones_like(scores, dtype=scores.dtype) * -float("inf")
343
+ updates = jnp.zeros((batch_size, 1), dtype=scores.dtype)
344
+ new_scores = lax.dynamic_update_slice(new_scores, updates, (0, current_token))
345
+ return new_scores
346
+
347
+ scores = lax.cond(
348
+ cur_len >= self.force_token_array.shape[0],
349
+ # If the current length is geq than the length of force_token_array, the processor does nothing.
350
+ lambda: scores,
351
+ # Otherwise, it may force a certain token.
352
+ lambda: lax.cond(
353
+ self.force_token_array[cur_len] >= 0,
354
+ # Only valid (positive) tokens are forced
355
+ lambda: _force_token(cur_len),
356
+ # Otherwise, the processor does nothing.
357
+ lambda: scores,
358
+ ),
359
+ )
360
+ return scores
361
+
362
+
363
+ class FlaxWhisperTimeStampLogitsProcessor(FlaxLogitsProcessor):
364
+ r"""
365
+ Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log
366
+ probs to `inf` so that they are sampled at their corresponding index.
367
+
368
+ Args:
369
+ generate_config (`GenerateConfig`):
370
+ The generate config used to generate the output. The following parameters are required:
371
+ eos_token_id (`int`, *optional*, defaults to 50257):
372
+ The id of the *end-of-sequence* token.
373
+ no_timestamps_token_id (`int`, *optional*, defaults to 50363):
374
+ The id of the `"<|notimestamps|>"` token.
375
+ max_initial_timestamp_index (`int`, *optional*, defaults to 1):
376
+ Used to set the maximum value of the initial timestamp. This is used to prevent the model from
377
+ predicting timestamps that are too far in the future.
378
+ """
379
+
380
+ def __init__(self, generate_config, model_config, decoder_input_length):
381
+ self.eos_token_id = generate_config.eos_token_id
382
+ self.no_timestamps_token_id = generate_config.no_timestamps_token_id
383
+ self.timestamp_begin = generate_config.no_timestamps_token_id + 1
384
+
385
+ self.begin_index = decoder_input_length + 1
386
+
387
+ if generate_config.is_multilingual:
388
+ # room for language token and task token
389
+ self.begin_index += 2
390
+ if hasattr(generate_config, "max_initial_timestamp_index"):
391
+ self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index
392
+ else:
393
+ self.max_initial_timestamp_index = model_config.vocab_size
394
+ if self.max_initial_timestamp_index is None:
395
+ self.max_initial_timestamp_index = model_config.vocab_size
396
+
397
+ def __call__(self, input_ids, scores, cur_len):
398
+ # suppress <|notimestamps|> which is handled by without_timestamps
399
+ scores = scores.at[:, self.no_timestamps_token_id].set(-float("inf"))
400
+
401
+ def handle_pairs(input_ids_k, scores_k):
402
+ last_was_timestamp = jnp.where((cur_len - self.begin_index) >= 1, True, False)
403
+ last_was_timestamp = jnp.where(
404
+ input_ids_k[cur_len - 1] >= self.timestamp_begin,
405
+ True and last_was_timestamp,
406
+ False,
407
+ )
408
+
409
+ penultimate_was_timestamp = jnp.where((cur_len - self.begin_index) < 2, True, False)
410
+ penultimate_was_timestamp = jnp.where(
411
+ input_ids_k[cur_len - 2] >= self.timestamp_begin,
412
+ True,
413
+ penultimate_was_timestamp,
414
+ )
415
+
416
+ return jnp.where(
417
+ last_was_timestamp,
418
+ jnp.where(
419
+ penultimate_was_timestamp > 0,
420
+ scores_k.at[self.timestamp_begin :].set(-float("inf")),
421
+ scores_k.at[: self.eos_token_id].set(-float("inf")),
422
+ ),
423
+ scores_k,
424
+ )
425
+
426
+ scores = jax.vmap(handle_pairs)(input_ids, scores)
427
+
428
+ apply_max_initial_timestamp = jnp.where(cur_len == self.begin_index, True, False)
429
+ apply_max_initial_timestamp = jnp.where(
430
+ self.max_initial_timestamp_index is not None,
431
+ True and apply_max_initial_timestamp,
432
+ False,
433
+ )
434
+
435
+ last_allowed = self.timestamp_begin + self.max_initial_timestamp_index
436
+
437
+ scores = jnp.where(
438
+ apply_max_initial_timestamp,
439
+ scores.at[:, last_allowed + 1 :].set(-float("inf")),
440
+ scores,
441
+ )
442
+
443
+ # if sum of probability over timestamps is above any other token, sample timestamp
444
+ logprobs = jax.nn.log_softmax(scores, axis=-1)
445
+
446
+ def handle_cumulative_probs(logprobs_k, scores_k):
447
+ timestamp_logprob = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1)
448
+ max_text_token_logprob = jnp.max(logprobs_k[: self.timestamp_begin])
449
+ return jnp.where(
450
+ timestamp_logprob > max_text_token_logprob,
451
+ scores_k.at[: self.timestamp_begin].set(-float("inf")),
452
+ scores_k,
453
+ )
454
+
455
+ scores = jax.vmap(handle_cumulative_probs)(logprobs, scores)
456
+
457
+ return scores
env-llmeval/lib/python3.10/site-packages/transformers/generation/flax_utils.py ADDED
@@ -0,0 +1,1019 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team.
3
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ import copy
19
+ import inspect
20
+ import warnings
21
+ from functools import partial
22
+ from typing import Any, Dict, Optional, Union
23
+
24
+ import flax
25
+ import jax
26
+ import jax.numpy as jnp
27
+ import numpy as np
28
+ from jax import lax
29
+
30
+ from ..models.auto import (
31
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
32
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
33
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
34
+ )
35
+ from ..utils import ModelOutput, logging
36
+ from .configuration_utils import GenerationConfig
37
+ from .flax_logits_process import (
38
+ FlaxForcedBOSTokenLogitsProcessor,
39
+ FlaxForcedEOSTokenLogitsProcessor,
40
+ FlaxForceTokensLogitsProcessor,
41
+ FlaxLogitsProcessorList,
42
+ FlaxMinLengthLogitsProcessor,
43
+ FlaxSuppressTokensAtBeginLogitsProcessor,
44
+ FlaxSuppressTokensLogitsProcessor,
45
+ FlaxTemperatureLogitsWarper,
46
+ FlaxTopKLogitsWarper,
47
+ FlaxTopPLogitsWarper,
48
+ )
49
+
50
+
51
+ logger = logging.get_logger(__name__)
52
+
53
+
54
+ @flax.struct.dataclass
55
+ class FlaxGreedySearchOutput(ModelOutput):
56
+ """
57
+ Flax Base class for outputs of decoder-only generation models using greedy search.
58
+
59
+
60
+ Args:
61
+ sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
62
+ The generated sequences.
63
+ """
64
+
65
+ sequences: jnp.ndarray = None
66
+
67
+
68
+ @flax.struct.dataclass
69
+ class FlaxSampleOutput(ModelOutput):
70
+ """
71
+ Flax Base class for outputs of decoder-only generation models using sampling.
72
+
73
+
74
+ Args:
75
+ sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
76
+ The generated sequences.
77
+ """
78
+
79
+ sequences: jnp.ndarray = None
80
+
81
+
82
+ @flax.struct.dataclass
83
+ class FlaxBeamSearchOutput(ModelOutput):
84
+ """
85
+ Flax Base class for outputs of decoder-only generation models using greedy search.
86
+
87
+
88
+ Args:
89
+ sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
90
+ The generated sequences.
91
+ scores (`jnp.ndarray` of shape `(batch_size,)`):
92
+ The scores (log probabilities) of the generated sequences.
93
+ """
94
+
95
+ sequences: jnp.ndarray = None
96
+ scores: jnp.ndarray = None
97
+
98
+
99
+ @flax.struct.dataclass
100
+ class GreedyState:
101
+ cur_len: jnp.ndarray
102
+ sequences: jnp.ndarray
103
+ running_token: jnp.ndarray
104
+ is_sent_finished: jnp.ndarray
105
+ model_kwargs: Dict[str, jnp.ndarray]
106
+
107
+
108
+ @flax.struct.dataclass
109
+ class SampleState:
110
+ cur_len: jnp.ndarray
111
+ sequences: jnp.ndarray
112
+ running_token: jnp.ndarray
113
+ is_sent_finished: jnp.ndarray
114
+ prng_key: jnp.ndarray
115
+ model_kwargs: Dict[str, jnp.ndarray]
116
+
117
+
118
+ @flax.struct.dataclass
119
+ class BeamSearchState:
120
+ cur_len: jnp.ndarray
121
+ running_sequences: jnp.ndarray
122
+ running_scores: jnp.ndarray
123
+ sequences: jnp.ndarray
124
+ scores: jnp.ndarray
125
+ is_sent_finished: jnp.ndarray
126
+ model_kwargs: Dict[str, jnp.ndarray]
127
+
128
+
129
+ class FlaxGenerationMixin:
130
+ """
131
+ A class containing all functions for auto-regressive text generation, to be used as a mixin in
132
+ [`FlaxPreTrainedModel`].
133
+
134
+ The class exposes [`~generation.FlaxGenerationMixin.generate`], which can be used for:
135
+ - *greedy decoding* by calling [`~generation.FlaxGenerationMixin._greedy_search`] if `num_beams=1` and
136
+ `do_sample=False`
137
+ - *multinomial sampling* by calling [`~generation.FlaxGenerationMixin._sample`] if `num_beams=1` and
138
+ `do_sample=True`
139
+ - *beam-search decoding* by calling [`~generation.FlaxGenerationMixin._beam_search`] if `num_beams>1` and
140
+ `do_sample=False`
141
+
142
+ You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To
143
+ learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
144
+ """
145
+
146
+ def prepare_inputs_for_generation(self, *args, **kwargs):
147
+ raise NotImplementedError(
148
+ "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`."
149
+ )
150
+
151
+ @staticmethod
152
+ def _run_loop_in_debug(cond_fn, body_fn, init_state):
153
+ """
154
+ Run generation in untraced mode. This should only be used for debugging purposes.
155
+ """
156
+ state = init_state
157
+ while cond_fn(state):
158
+ state = body_fn(state)
159
+ return state
160
+
161
+ def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, params, model_kwargs):
162
+ encoder_kwargs = {
163
+ argument: value
164
+ for argument, value in model_kwargs.items()
165
+ if not (argument.startswith("decoder_") or argument.startswith("cross_attn"))
166
+ }
167
+ model_kwargs["encoder_outputs"] = self.encode(input_ids, params=params, return_dict=True, **encoder_kwargs)
168
+ return model_kwargs
169
+
170
+ def _prepare_decoder_input_ids_for_generation(
171
+ self,
172
+ batch_size: int,
173
+ decoder_start_token_id: int = None,
174
+ bos_token_id: int = None,
175
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
176
+ ) -> jnp.ndarray:
177
+ if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
178
+ # Only use this arg if not None, otherwise just remove from model_kwargs
179
+ decoder_input_ids = model_kwargs.pop("decoder_input_ids")
180
+ if decoder_input_ids is not None:
181
+ return decoder_input_ids
182
+ decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
183
+ return jnp.array(decoder_start_token_id, dtype="i4").reshape(1, -1).repeat(batch_size, axis=0)
184
+
185
+ def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int:
186
+ # retrieve decoder_start_token_id for encoder-decoder models
187
+ # fall back to bos_token_id if necessary
188
+ decoder_start_token_id = (
189
+ decoder_start_token_id
190
+ if decoder_start_token_id is not None
191
+ else self.generation_config.decoder_start_token_id
192
+ )
193
+ bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
194
+ if decoder_start_token_id is not None:
195
+ return decoder_start_token_id
196
+ elif (
197
+ hasattr(self.config, "decoder")
198
+ and hasattr(self.config.decoder, "decoder_start_token_id")
199
+ and self.config.decoder.decoder_start_token_id is not None
200
+ ):
201
+ return self.config.decoder.decoder_start_token_id
202
+ elif bos_token_id is not None:
203
+ return bos_token_id
204
+ elif (
205
+ hasattr(self.config, "decoder")
206
+ and hasattr(self.config.decoder, "bos_token_id")
207
+ and self.config.decoder.bos_token_id is not None
208
+ ):
209
+ return self.config.decoder.bos_token_id
210
+ raise ValueError(
211
+ "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
212
+ )
213
+
214
+ @staticmethod
215
+ def _expand_to_num_beams(tensor, num_beams):
216
+ return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:])
217
+
218
+ def _adapt_logits_for_beam_search(self, logits):
219
+ """
220
+ This function can be overwritten in the specific modeling_flax_<model-name>.py classes to allow for custom beam
221
+ search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`].
222
+ """
223
+ return logits
224
+
225
+ def _validate_model_class(self):
226
+ """
227
+ Confirms that the model class is compatible with generation. If not, raises an exception that points to the
228
+ right class to use.
229
+ """
230
+ if not self.can_generate():
231
+ generate_compatible_mappings = [
232
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
233
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
234
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
235
+ ]
236
+ generate_compatible_classes = set()
237
+ for model_mapping in generate_compatible_mappings:
238
+ supported_models = model_mapping.get(type(self.config), default=None)
239
+ if supported_models is not None:
240
+ generate_compatible_classes.add(supported_models.__name__)
241
+ exception_message = (
242
+ f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
243
+ "it doesn't have a language model head."
244
+ )
245
+ if generate_compatible_classes:
246
+ exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}"
247
+ raise TypeError(exception_message)
248
+
249
+ def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
250
+ """Validates model kwargs for generation. Generate argument typos will also be caught here."""
251
+ unused_model_args = []
252
+ model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
253
+ # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
254
+ # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
255
+ if "kwargs" in model_args or "model_kwargs" in model_args:
256
+ model_args |= set(inspect.signature(self.__call__).parameters)
257
+ for key, value in model_kwargs.items():
258
+ if value is not None and key not in model_args:
259
+ unused_model_args.append(key)
260
+
261
+ if unused_model_args:
262
+ raise ValueError(
263
+ f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
264
+ " generate arguments will also show up in this list)"
265
+ )
266
+
267
+ def generate(
268
+ self,
269
+ input_ids: jnp.ndarray,
270
+ generation_config: Optional[GenerationConfig] = None,
271
+ prng_key: Optional[jnp.ndarray] = None,
272
+ trace: bool = True,
273
+ params: Optional[Dict[str, jnp.ndarray]] = None,
274
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
275
+ **kwargs,
276
+ ):
277
+ r"""
278
+ Generates sequences of token ids for models with a language modeling head.
279
+
280
+ Parameters:
281
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
282
+ The sequence used as a prompt for the generation.
283
+ generation_config (`~generation.GenerationConfig`, *optional*):
284
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
285
+ passed to generate matching the attributes of `generation_config` will override them. If
286
+ `generation_config` is not provided, the default will be used, which had the following loading
287
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
288
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
289
+ default values, whose documentation should be checked to parameterize generation.
290
+ trace (`bool`, *optional*, defaults to `True`):
291
+ Whether to trace generation. Setting `trace=False` should only be used for debugging and will lead to a
292
+ considerably slower runtime.
293
+ params (`Dict[str, jnp.ndarray]`, *optional*):
294
+ Optionally the model parameters can be passed. Can be useful for parallelized generation.
295
+ logits_processor (`FlaxLogitsProcessorList `, *optional*):
296
+ Custom logits processors that complement the default logits processors built from arguments and
297
+ generation config. If a logit processor is passed that is already created with the arguments or a
298
+ generation config an error is thrown. This feature is intended for advanced users.
299
+ kwargs (`Dict[str, Any]`, *optional*):
300
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
301
+ forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
302
+ specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
303
+
304
+ Return:
305
+ [`~utils.ModelOutput`].
306
+
307
+ """
308
+ # Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
309
+ self._validate_model_class()
310
+
311
+ # priority: `generation_config` argument > `model.generation_config` (the default generation config)
312
+ if generation_config is None:
313
+ # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior,
314
+ # two conditions must be met
315
+ # 1) the generation config must have been created from the model config (`_from_model_config` field);
316
+ # 2) the generation config must have seen no modification since its creation (the hash is the same).
317
+ if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash(
318
+ self.generation_config
319
+ ):
320
+ new_generation_config = GenerationConfig.from_model_config(self.config)
321
+ if new_generation_config != self.generation_config:
322
+ warnings.warn(
323
+ "You have modified the pretrained model configuration to control generation. This is a"
324
+ " deprecated strategy to control generation and will be removed soon, in a future version."
325
+ " Please use and modify the model generation configuration (see"
326
+ " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )"
327
+ )
328
+ self.generation_config = new_generation_config
329
+ generation_config = self.generation_config
330
+
331
+ generation_config = copy.deepcopy(generation_config)
332
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
333
+ self._validate_model_kwargs(model_kwargs.copy())
334
+
335
+ logits_processor = logits_processor if logits_processor is not None else FlaxLogitsProcessorList()
336
+
337
+ # set init values
338
+ prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
339
+
340
+ if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
341
+ if model_kwargs.get("attention_mask") is None:
342
+ logger.warning(
343
+ "The attention mask and the pad token id were not set. As a consequence, you may observe "
344
+ "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
345
+ )
346
+ eos_token_id = generation_config.eos_token_id
347
+ if isinstance(eos_token_id, list):
348
+ eos_token_id = eos_token_id[0]
349
+ logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
350
+ generation_config.pad_token_id = eos_token_id
351
+
352
+ if generation_config.decoder_start_token_id is None and self.config.is_encoder_decoder:
353
+ raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.")
354
+
355
+ # decoder-only models should use left-padding for generation (can't be checked with `trace=True`)
356
+ if not self.config.is_encoder_decoder and not trace:
357
+ if (
358
+ generation_config.pad_token_id is not None
359
+ and jnp.sum(input_ids[:, -1] == generation_config.pad_token_id) > 0
360
+ ):
361
+ logger.warning(
362
+ "A decoder-only architecture is being used, but right-padding was detected! For correct "
363
+ "generation results, please set `padding_side='left'` when initializing the tokenizer."
364
+ )
365
+
366
+ batch_size = input_ids.shape[0]
367
+
368
+ if self.config.is_encoder_decoder:
369
+ # add encoder_outputs to model_kwargs
370
+ if model_kwargs.get("encoder_outputs") is None:
371
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, params, model_kwargs)
372
+ # prepare decoder_input_ids for generation
373
+ input_ids = self._prepare_decoder_input_ids_for_generation(
374
+ batch_size,
375
+ decoder_start_token_id=generation_config.decoder_start_token_id,
376
+ bos_token_id=generation_config.bos_token_id,
377
+ model_kwargs=model_kwargs,
378
+ )
379
+
380
+ # Prepare `max_length` depending on other stopping criteria.
381
+ input_ids_seq_length = input_ids.shape[-1]
382
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
383
+ if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20:
384
+ # 20 is the default max_length of the generation config
385
+ warnings.warn(
386
+ f"Using the model-agnostic default `max_length` (={generation_config.max_length}) "
387
+ "to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.",
388
+ UserWarning,
389
+ )
390
+ elif generation_config.max_new_tokens is not None:
391
+ if not has_default_max_length and generation_config.max_length is not None:
392
+ logger.warning(
393
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
394
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
395
+ "Please refer to the documentation for more information. "
396
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
397
+ )
398
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
399
+
400
+ if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length:
401
+ raise ValueError(
402
+ f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger than"
403
+ f" the maximum length ({generation_config.max_length})"
404
+ )
405
+ if input_ids_seq_length >= generation_config.max_length:
406
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
407
+ logger.warning(
408
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
409
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
410
+ " increasing`max_new_tokens`."
411
+ )
412
+
413
+ logits_processor = self._get_logits_processor(
414
+ generation_config=generation_config,
415
+ input_ids_seq_length=input_ids_seq_length,
416
+ logits_processor=logits_processor,
417
+ )
418
+
419
+ if not generation_config.do_sample and generation_config.num_beams == 1:
420
+ return self._greedy_search(
421
+ input_ids,
422
+ generation_config.max_length,
423
+ generation_config.pad_token_id,
424
+ generation_config.eos_token_id,
425
+ logits_processor=logits_processor,
426
+ trace=trace,
427
+ params=params,
428
+ model_kwargs=model_kwargs,
429
+ )
430
+ elif generation_config.do_sample and generation_config.num_beams == 1:
431
+ logits_warper = self._get_logits_warper(generation_config=generation_config)
432
+ return self._sample(
433
+ input_ids,
434
+ generation_config.max_length,
435
+ generation_config.pad_token_id,
436
+ generation_config.eos_token_id,
437
+ prng_key,
438
+ logits_warper=logits_warper,
439
+ logits_processor=logits_processor,
440
+ trace=trace,
441
+ params=params,
442
+ model_kwargs=model_kwargs,
443
+ )
444
+ elif not generation_config.do_sample and generation_config.num_beams > 1:
445
+ # broadcast input_ids & encoder_outputs
446
+ input_ids = self._expand_to_num_beams(input_ids, num_beams=generation_config.num_beams)
447
+
448
+ if "encoder_outputs" in model_kwargs:
449
+ model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams(
450
+ model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=generation_config.num_beams
451
+ )
452
+
453
+ for kwarg in ["attention_mask", "decoder_attention_mask"]:
454
+ if kwarg in model_kwargs:
455
+ model_kwargs[kwarg] = self._expand_to_num_beams(
456
+ model_kwargs[kwarg], num_beams=generation_config.num_beams
457
+ )
458
+
459
+ return self._beam_search(
460
+ input_ids,
461
+ generation_config.max_length,
462
+ generation_config.pad_token_id,
463
+ generation_config.eos_token_id,
464
+ length_penalty=generation_config.length_penalty,
465
+ early_stopping=generation_config.early_stopping,
466
+ logits_processor=logits_processor,
467
+ trace=trace,
468
+ params=params,
469
+ num_return_sequences=generation_config.num_return_sequences,
470
+ model_kwargs=model_kwargs,
471
+ )
472
+ else:
473
+ raise NotImplementedError("`Beam sampling is currently not implemented.")
474
+
475
+ def _get_logits_warper(self, generation_config: GenerationConfig) -> FlaxLogitsProcessorList:
476
+ """
477
+ This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsWarper`]
478
+ instances used for multinomial sampling.
479
+ """
480
+ warpers = FlaxLogitsProcessorList()
481
+
482
+ if generation_config.temperature is not None and generation_config.temperature != 1.0:
483
+ warpers.append(FlaxTemperatureLogitsWarper(generation_config.temperature))
484
+ if generation_config.top_k is not None and generation_config.top_k != 0:
485
+ warpers.append(FlaxTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1))
486
+ if generation_config.top_p is not None and generation_config.top_p < 1.0:
487
+ warpers.append(FlaxTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1))
488
+
489
+ return warpers
490
+
491
+ def _get_logits_processor(
492
+ self,
493
+ generation_config: GenerationConfig,
494
+ input_ids_seq_length: int,
495
+ logits_processor: Optional[FlaxLogitsProcessorList],
496
+ ) -> FlaxLogitsProcessorList:
497
+ """
498
+ This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsProcessor`]
499
+ instances used to modify the scores of the language model head.
500
+ """
501
+ processors = FlaxLogitsProcessorList()
502
+
503
+ if (
504
+ generation_config.min_length is not None
505
+ and generation_config.eos_token_id is not None
506
+ and generation_config.min_length > -1
507
+ ):
508
+ processors.append(
509
+ FlaxMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)
510
+ )
511
+ if generation_config.forced_bos_token_id is not None:
512
+ processors.append(FlaxForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id))
513
+ if generation_config.forced_eos_token_id is not None:
514
+ processors.append(
515
+ FlaxForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id)
516
+ )
517
+ if generation_config.suppress_tokens is not None:
518
+ processors.append(FlaxSuppressTokensLogitsProcessor(generation_config.suppress_tokens))
519
+ if generation_config.begin_suppress_tokens is not None:
520
+ begin_index = input_ids_seq_length
521
+ begin_index = (
522
+ begin_index
523
+ if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
524
+ else begin_index + 1
525
+ )
526
+ if generation_config.forced_decoder_ids is not None and len(generation_config.forced_decoder_ids) > 0:
527
+ # generation starts after the last token that is forced
528
+ begin_index += generation_config.forced_decoder_ids[-1][0]
529
+ processors.append(
530
+ FlaxSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)
531
+ )
532
+ if generation_config.forced_decoder_ids is not None:
533
+ forced_decoder_ids = [
534
+ [input_ids_seq_length + i[0] - 1, i[1]] for i in generation_config.forced_decoder_ids
535
+ ]
536
+ processors.append(FlaxForceTokensLogitsProcessor(forced_decoder_ids))
537
+ processors = self._merge_criteria_processor_list(processors, logits_processor)
538
+
539
+ return processors
540
+
541
+ def _merge_criteria_processor_list(
542
+ self,
543
+ default_list: FlaxLogitsProcessorList,
544
+ custom_list: FlaxLogitsProcessorList,
545
+ ) -> FlaxLogitsProcessorList:
546
+ if len(custom_list) == 0:
547
+ return default_list
548
+ for default in default_list:
549
+ for custom in custom_list:
550
+ if type(custom) is type(default):
551
+ object_type = "logits processor"
552
+ raise ValueError(
553
+ f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
554
+ f" `generate`, but it has already been created with the values {default}. {default} has been"
555
+ " created by passing the corresponding arguments to generate or by the model's config default"
556
+ f" values. If you just want to change the default values of {object_type} consider passing"
557
+ f" them as arguments to `generate` instead of using a custom {object_type}."
558
+ )
559
+ default_list.extend(custom_list)
560
+ return default_list
561
+
562
+ def _greedy_search(
563
+ self,
564
+ input_ids: None,
565
+ max_length: Optional[int] = None,
566
+ pad_token_id: Optional[int] = None,
567
+ eos_token_id: Optional[int] = None,
568
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
569
+ trace: bool = True,
570
+ params: Optional[Dict[str, jnp.ndarray]] = None,
571
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
572
+ ):
573
+ # init values
574
+ max_length = max_length if max_length is not None else self.generation_config.max_length
575
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
576
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
577
+
578
+ batch_size, cur_len = input_ids.shape
579
+
580
+ eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
581
+ pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
582
+ cur_len = jnp.array(cur_len)
583
+
584
+ # per batch-item holding current token in loop.
585
+ sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
586
+ sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
587
+
588
+ # per batch-item state bit indicating if sentence has finished.
589
+ is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
590
+
591
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
592
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
593
+ model = self.decode if self.config.is_encoder_decoder else self
594
+ # initialize model specific kwargs
595
+ model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
596
+
597
+ # initialize state
598
+ state = GreedyState(
599
+ cur_len=cur_len,
600
+ sequences=sequences,
601
+ running_token=input_ids,
602
+ is_sent_finished=is_sent_finished,
603
+ model_kwargs=model_kwargs,
604
+ )
605
+
606
+ def greedy_search_cond_fn(state):
607
+ """state termination condition fn."""
608
+ has_reached_max_length = state.cur_len == max_length
609
+ all_sequence_finished = jnp.all(state.is_sent_finished)
610
+ finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
611
+ return ~finish_generation
612
+
613
+ def greedy_search_body_fn(state):
614
+ """state update fn."""
615
+ model_outputs = model(state.running_token, params=params, **state.model_kwargs)
616
+ logits = model_outputs.logits[:, -1]
617
+
618
+ # apply min_length, ...
619
+ logits = logits_processor(state.sequences, logits, state.cur_len)
620
+
621
+ next_token = jnp.argmax(logits, axis=-1)
622
+
623
+ next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished
624
+ next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
625
+ next_token = next_token[:, None]
626
+
627
+ next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
628
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
629
+ return GreedyState(
630
+ cur_len=state.cur_len + 1,
631
+ sequences=next_sequences,
632
+ running_token=next_token,
633
+ is_sent_finished=next_is_sent_finished,
634
+ model_kwargs=next_model_kwargs,
635
+ )
636
+
637
+ # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
638
+ if input_ids.shape[1] > 1:
639
+ state = greedy_search_body_fn(state)
640
+
641
+ if not trace:
642
+ state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state)
643
+ else:
644
+ state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state)
645
+
646
+ return FlaxGreedySearchOutput(sequences=state.sequences)
647
+
648
+ def _sample(
649
+ self,
650
+ input_ids: None,
651
+ max_length: Optional[int] = None,
652
+ pad_token_id: Optional[int] = None,
653
+ eos_token_id: Optional[int] = None,
654
+ prng_key: Optional[jnp.ndarray] = None,
655
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
656
+ logits_warper: Optional[FlaxLogitsProcessorList] = None,
657
+ trace: bool = True,
658
+ params: Optional[Dict[str, jnp.ndarray]] = None,
659
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
660
+ ):
661
+ # init values
662
+ max_length = max_length if max_length is not None else self.generation_config.max_length
663
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
664
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
665
+ prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
666
+
667
+ batch_size, cur_len = input_ids.shape
668
+
669
+ eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
670
+ pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
671
+ cur_len = jnp.array(cur_len)
672
+
673
+ # per batch-item holding current token in loop.
674
+ sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
675
+ sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
676
+
677
+ # per batch-item state bit indicating if sentence has finished.
678
+ is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
679
+
680
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
681
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
682
+ model = self.decode if self.config.is_encoder_decoder else self
683
+
684
+ # initialize model specific kwargs
685
+ model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
686
+
687
+ # initialize state
688
+ state = SampleState(
689
+ cur_len=cur_len,
690
+ sequences=sequences,
691
+ running_token=input_ids,
692
+ is_sent_finished=is_sent_finished,
693
+ prng_key=prng_key,
694
+ model_kwargs=model_kwargs,
695
+ )
696
+
697
+ def sample_search_cond_fn(state):
698
+ """state termination condition fn."""
699
+ has_reached_max_length = state.cur_len == max_length
700
+ all_sequence_finished = jnp.all(state.is_sent_finished)
701
+ finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
702
+ return ~finish_generation
703
+
704
+ def sample_search_body_fn(state):
705
+ """state update fn."""
706
+ prng_key, prng_key_next = jax.random.split(state.prng_key)
707
+ model_outputs = model(state.running_token, params=params, **state.model_kwargs)
708
+
709
+ logits = model_outputs.logits[:, -1]
710
+
711
+ # apply min_length, ...
712
+ logits = logits_processor(state.sequences, logits, state.cur_len)
713
+ # apply top_p, top_k, temperature
714
+ logits = logits_warper(logits, logits, state.cur_len)
715
+
716
+ next_token = jax.random.categorical(prng_key, logits, axis=-1)
717
+
718
+ next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished
719
+ next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
720
+ next_token = next_token[:, None]
721
+
722
+ next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
723
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
724
+
725
+ return SampleState(
726
+ cur_len=state.cur_len + 1,
727
+ sequences=next_sequences,
728
+ running_token=next_token,
729
+ is_sent_finished=next_is_sent_finished,
730
+ model_kwargs=next_model_kwargs,
731
+ prng_key=prng_key_next,
732
+ )
733
+
734
+ # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
735
+ if input_ids.shape[1] > 1:
736
+ state = sample_search_body_fn(state)
737
+
738
+ if not trace:
739
+ state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state)
740
+ else:
741
+ state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state)
742
+
743
+ return FlaxSampleOutput(sequences=state.sequences)
744
+
745
+ def _beam_search(
746
+ self,
747
+ input_ids: None,
748
+ max_length: Optional[int] = None,
749
+ pad_token_id: Optional[int] = None,
750
+ eos_token_id: Optional[int] = None,
751
+ length_penalty: Optional[float] = None,
752
+ early_stopping: Optional[Union[bool, str]] = None,
753
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
754
+ trace: bool = True,
755
+ params: Optional[Dict[str, jnp.ndarray]] = None,
756
+ num_return_sequences: Optional[int] = None,
757
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
758
+ ):
759
+ """
760
+ This beam search function is heavily inspired by Flax's official example:
761
+ https://github.com/google/flax/blob/main/examples/wmt/decode.py
762
+ """
763
+
764
+ def flatten_beam_dim(tensor):
765
+ """Flattens the first two dimensions of a non-scalar array."""
766
+ # ignore scalars (e.g. cache index)
767
+ if tensor.ndim == 0:
768
+ return tensor
769
+ return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:])
770
+
771
+ def unflatten_beam_dim(tensor, batch_size, num_beams):
772
+ """Unflattens the first, flat batch*beam dimension of a non-scalar array."""
773
+ # ignore scalars (e.g. cache index)
774
+ if tensor.ndim == 0:
775
+ return tensor
776
+ return tensor.reshape((batch_size, num_beams) + tensor.shape[1:])
777
+
778
+ def gather_beams(nested, beam_indices, batch_size, new_num_beams):
779
+ """
780
+ Gathers the beam slices indexed by beam_indices into new beam array.
781
+ """
782
+ batch_indices = jnp.reshape(
783
+ jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams)
784
+ )
785
+
786
+ def gather_fn(tensor):
787
+ # ignore scalars (e.g. cache index)
788
+ if tensor.ndim == 0:
789
+ return tensor
790
+ else:
791
+ return tensor[batch_indices, beam_indices]
792
+
793
+ return jax.tree_util.tree_map(gather_fn, nested)
794
+
795
+ # init values
796
+ max_length = max_length if max_length is not None else self.generation_config.max_length
797
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
798
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
799
+ length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty
800
+ early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping
801
+ num_return_sequences = (
802
+ num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences
803
+ )
804
+
805
+ batch_size, num_beams, cur_len = input_ids.shape
806
+
807
+ eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
808
+ pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
809
+ cur_len = jnp.array(cur_len)
810
+
811
+ # record the prompt length of decoder
812
+ decoder_prompt_len = input_ids.shape[-1]
813
+
814
+ # per batch,beam-item holding current token in loop.
815
+ sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
816
+ running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
817
+ running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0))
818
+
819
+ # per batch,beam-item state bit indicating if sentence has finished.
820
+ is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_)
821
+
822
+ # per batch,beam-item score, logprobs
823
+ running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1])
824
+ scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7)
825
+
826
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
827
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
828
+ model = self.decode if self.config.is_encoder_decoder else self
829
+
830
+ # flatten beam dim
831
+ if "encoder_outputs" in model_kwargs:
832
+ model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim(
833
+ model_kwargs["encoder_outputs"]["last_hidden_state"]
834
+ )
835
+ for kwarg in ["attention_mask", "decoder_attention_mask"]:
836
+ if kwarg in model_kwargs:
837
+ model_kwargs[kwarg] = flatten_beam_dim(model_kwargs[kwarg])
838
+
839
+ # initialize model specific kwargs
840
+ model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs)
841
+
842
+ # initialize state
843
+ state = BeamSearchState(
844
+ cur_len=cur_len,
845
+ running_sequences=running_sequences,
846
+ running_scores=running_scores,
847
+ sequences=sequences,
848
+ scores=scores,
849
+ is_sent_finished=is_sent_finished,
850
+ model_kwargs=model_kwargs,
851
+ )
852
+
853
+ def beam_search_cond_fn(state):
854
+ """beam search state termination condition fn."""
855
+
856
+ # 1. is less than max length?
857
+ not_max_length_yet = state.cur_len < max_length
858
+
859
+ # 2. can the new beams still improve?
860
+ # early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion
861
+ # below for more details.
862
+ # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
863
+ # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of
864
+ # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there.
865
+ if early_stopping == "never" and length_penalty > 0.0:
866
+ best_running_score = state.running_scores[:, :1] / (
867
+ (max_length - decoder_prompt_len) ** length_penalty
868
+ )
869
+ else:
870
+ best_running_score = state.running_scores[:, :1] / (
871
+ (state.cur_len - decoder_prompt_len) ** length_penalty
872
+ )
873
+ worst_finished_score = jnp.where(
874
+ state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7)
875
+ )
876
+ improvement_still_possible = jnp.any(best_running_score > worst_finished_score)
877
+
878
+ # 3. is there still a beam that has not finished?
879
+ still_open_beam = ~(jnp.all(state.is_sent_finished) & (early_stopping is True))
880
+
881
+ return not_max_length_yet & still_open_beam & improvement_still_possible
882
+
883
+ def beam_search_body_fn(state, input_ids_length=1):
884
+ """beam search state update fn."""
885
+ # 1. Forward current tokens
886
+ # Collect the current position slice along length to feed the fast
887
+ # autoregressive decoder model. Flatten the beam dimension into batch
888
+ # dimension for feeding into the model.
889
+ # unflatten beam dimension
890
+ # Unflatten beam dimension in attention cache arrays
891
+ input_token = flatten_beam_dim(
892
+ lax.dynamic_slice(
893
+ state.running_sequences,
894
+ (0, 0, state.cur_len - input_ids_length),
895
+ (batch_size, num_beams, input_ids_length),
896
+ )
897
+ )
898
+ model_outputs = model(input_token, params=params, **state.model_kwargs)
899
+
900
+ logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams)
901
+ cache = jax.tree_util.tree_map(
902
+ lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values
903
+ )
904
+
905
+ # adapt logits for FlaxMarianMTModel
906
+ logits = self._adapt_logits_for_beam_search(logits)
907
+
908
+ # 2. Compute log probs
909
+ # get log probabilities from logits,
910
+ # process logits with processors (*e.g.* min_length, ...), and
911
+ # add new logprobs to existing running logprobs scores.
912
+ log_probs = jax.nn.log_softmax(logits)
913
+ log_probs = logits_processor(
914
+ flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), state.cur_len
915
+ )
916
+ log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams)
917
+ log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2)
918
+ vocab_size = log_probs.shape[2]
919
+ log_probs = log_probs.reshape((batch_size, num_beams * vocab_size))
920
+
921
+ # 3. Retrieve top-K
922
+ # Each item in batch has num_beams * vocab_size candidate sequences.
923
+ # For each item, get the top 2*k candidates with the highest log-
924
+ # probabilities. We gather the top 2*K beams here so that even if the best
925
+ # K sequences reach EOS simultaneously, we have another K sequences
926
+ # remaining to continue the live beam search.
927
+ # Gather the top 2*K scores from _all_ beams.
928
+ # Gather 2*k top beams.
929
+ # Recover the beam index by floor division.
930
+ # Recover token id by modulo division and expand Id array for broadcasting.
931
+ # Update sequences for the 2*K top-k new sequences.
932
+ beams_to_keep = 2 * num_beams
933
+ topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep)
934
+ topk_beam_indices = topk_indices // vocab_size
935
+ topk_running_sequences = gather_beams(
936
+ state.running_sequences, topk_beam_indices, batch_size, beams_to_keep
937
+ )
938
+ topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2)
939
+ topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len))
940
+
941
+ # 4. Check which sequences have ended
942
+ # Update current sequences:
943
+ # Did any of these sequences reach an end marker?
944
+ # To prevent these just finished sequences from being added to the current sequences
945
+ # set of active beam search sequences, set their log probs to a very large
946
+ # negative value.
947
+ did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id
948
+ running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7)
949
+ # 5. Get running sequences scores for next
950
+ # Determine the top k beam indices (from top 2*k beams) from log probs
951
+ # and gather top k beams (from top 2*k beams).
952
+ next_topk_indices = lax.top_k(running_topk_log_probs, k=num_beams)[1]
953
+ next_running_sequences, next_running_scores = gather_beams(
954
+ [topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams
955
+ )
956
+
957
+ # 6. Process topk logits
958
+ # Further process log probs:
959
+ # - add length penalty
960
+ # - make sure no scores can be added anymore if beam is full
961
+ # - make sure still running sequences cannot be chosen as finalized beam
962
+ topk_log_probs = topk_log_probs / ((state.cur_len + 1 - decoder_prompt_len) ** length_penalty)
963
+ beams_in_batch_are_full = jnp.broadcast_to(
964
+ state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape
965
+ ) & (early_stopping is True)
966
+ add_penalty = ~did_topk_just_finished | beams_in_batch_are_full
967
+ topk_log_probs += add_penalty * np.array(-1.0e7)
968
+
969
+ # 7. Get scores, sequences, is sentence finished for next.
970
+ # Combine sequences, scores, and flags along the beam dimension and compare
971
+ # new finished sequence scores to existing finished scores and select the
972
+ # best from the new set of beams
973
+ merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1)
974
+ merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1)
975
+ merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1)
976
+ topk_merged_indices = lax.top_k(merged_scores, k=num_beams)[1]
977
+ next_sequences, next_scores, next_is_sent_finished = gather_beams(
978
+ [merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams
979
+ )
980
+
981
+ # 8. Update model kwargs.
982
+ # Determine the top k beam indices from the original set of all beams.
983
+ # With these, gather the top k beam-associated caches.
984
+ next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams)
985
+ next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams)
986
+ model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache)
987
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
988
+
989
+ return BeamSearchState(
990
+ cur_len=state.cur_len + 1,
991
+ running_scores=next_running_scores,
992
+ running_sequences=next_running_sequences,
993
+ scores=next_scores,
994
+ sequences=next_sequences,
995
+ is_sent_finished=next_is_sent_finished,
996
+ model_kwargs=next_model_kwargs,
997
+ )
998
+
999
+ # Always run first iteration outside of `lax.while_loop` to avoid calling `beam_search_cond_fn`
1000
+ # when `state.cur_len` equals `decoder_prompt_len`. This also helps to comply with TPU when
1001
+ # the very first prompt has sequence length > 1.
1002
+ state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state)
1003
+
1004
+ if not trace:
1005
+ state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state)
1006
+ else:
1007
+ state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state)
1008
+
1009
+ # Account for the edge-case where there are no finished sequences for a
1010
+ # particular batch item. If so, return running sequences for that batch item.
1011
+ none_finished = jnp.any(state.is_sent_finished, axis=1)
1012
+ sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences)
1013
+ scores = jnp.where(none_finished[:, None], state.scores, state.running_scores)
1014
+
1015
+ # Take best beams for each batch (the score is sorted in descending order)
1016
+ sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :])
1017
+ scores = flatten_beam_dim(scores[:, :num_return_sequences])
1018
+
1019
+ return FlaxBeamSearchOutput(sequences=sequences, scores=scores)
env-llmeval/lib/python3.10/site-packages/transformers/generation/logits_process.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import warnings
3
+ from abc import ABC
4
+ from copy import deepcopy
5
+ from typing import Optional
6
+
7
+ import torch
8
+
9
+ from ..utils import add_start_docstrings, logging
10
+
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+
15
+ STOPPING_CRITERIA_INPUTS_DOCSTRING = r"""
16
+ Args:
17
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
18
+ Indices of input sequence tokens in the vocabulary.
19
+
20
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
21
+ [`PreTrainedTokenizer.__call__`] for details.
22
+
23
+ [What are input IDs?](../glossary#input-ids)
24
+ scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
25
+ Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
26
+ or scores for each vocabulary token after SoftMax. If this stopping criteria depends on the `scores` input,
27
+ make sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`.
28
+ kwargs (`Dict[str, Any]`, *optional*):
29
+ Additional stopping criteria specific kwargs.
30
+
31
+ Return:
32
+ `torch.BoolTensor`. (`torch.BoolTensor` of shape `(batch_size, 1)`), where `True` indicates we stop generation
33
+ for a particular row, `True` indicates we should continue.
34
+
35
+ """
36
+
37
+
38
+ class StoppingCriteria(ABC):
39
+ """Abstract base class for all stopping criteria that can be applied during generation.
40
+
41
+ If your stopping criteria depends on the `scores` input, make sure you pass `return_dict_in_generate=True,
42
+ output_scores=True` to `generate`.
43
+ """
44
+
45
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
46
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
47
+ raise NotImplementedError("StoppingCriteria needs to be subclassed")
48
+
49
+
50
+ class MaxLengthCriteria(StoppingCriteria):
51
+ """
52
+ This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep
53
+ in mind for decoder-only type of transformers, this will include the initial prompted tokens.
54
+
55
+ Args:
56
+ max_length (`int`):
57
+ The maximum length that the output sequence can have in number of tokens.
58
+ max_position_embeddings (`int`, *optional*):
59
+ The maximum model length, as defined by the model's `config.max_position_embeddings` attribute.
60
+ """
61
+
62
+ def __init__(self, max_length: int, max_position_embeddings: Optional[int] = None):
63
+ self.max_length = max_length
64
+ self.max_position_embeddings = max_position_embeddings
65
+
66
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
67
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
68
+ cur_len = input_ids.shape[-1]
69
+ is_done = cur_len >= self.max_length
70
+ if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
71
+ logger.warning_once(
72
+ "This is a friendly reminder - the current text generation call will exceed the model's predefined "
73
+ f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
74
+ "exceptions, performance degradation, or nothing at all."
75
+ )
76
+ return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
77
+
78
+
79
+ class MaxNewTokensCriteria(StoppingCriteria):
80
+ """
81
+ This class can be used to stop generation whenever the generated number of tokens exceeds `max_new_tokens`. Keep in
82
+ mind for decoder-only type of transformers, this will **not** include the initial prompted tokens. This is very
83
+ close to `MaxLengthCriteria` but ignores the number of initial tokens.
84
+
85
+ Args:
86
+ start_length (`int`):
87
+ The number of initial tokens.
88
+ max_new_tokens (`int`):
89
+ The maximum number of tokens to generate.
90
+ """
91
+
92
+ def __init__(self, start_length: int, max_new_tokens: int):
93
+ warnings.warn(
94
+ "The class `MaxNewTokensCriteria` is deprecated. "
95
+ f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
96
+ "with `max_length = start_length + max_new_tokens` instead.",
97
+ FutureWarning,
98
+ )
99
+ self.start_length = start_length
100
+ self.max_new_tokens = max_new_tokens
101
+ self.max_length = start_length + max_new_tokens
102
+
103
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
104
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
105
+ is_done = input_ids.shape[-1] >= self.max_length
106
+ return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
107
+
108
+
109
+ class MaxTimeCriteria(StoppingCriteria):
110
+ """
111
+ This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the
112
+ time will start being counted when you initialize this function. You can override this by passing an
113
+ `initial_time`.
114
+
115
+ Args:
116
+ max_time (`float`):
117
+ The maximum allowed time in seconds for the generation.
118
+ initial_time (`float`, *optional*, defaults to `time.time()`):
119
+ The start of the generation allowed time.
120
+ """
121
+
122
+ def __init__(self, max_time: float, initial_timestamp: Optional[float] = None):
123
+ self.max_time = max_time
124
+ self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp
125
+
126
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
127
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
128
+ is_done = time.time() - self.initial_timestamp > self.max_time
129
+ return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
130
+
131
+
132
+ class StoppingCriteriaList(list):
133
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
134
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
135
+ is_done = torch.full((input_ids.shape[0],), False, device=input_ids.device)
136
+ for criteria in self:
137
+ is_done = is_done | criteria(input_ids, scores, **kwargs)
138
+ return is_done
139
+
140
+ @property
141
+ def max_length(self) -> Optional[int]:
142
+ for stopping_criterium in self:
143
+ if isinstance(stopping_criterium, MaxLengthCriteria):
144
+ return stopping_criterium.max_length
145
+ elif isinstance(stopping_criterium, MaxNewTokensCriteria):
146
+ return stopping_criterium.max_length
147
+ return None
148
+
149
+
150
+ def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int) -> StoppingCriteriaList:
151
+ stopping_max_length = stopping_criteria.max_length
152
+ new_stopping_criteria = deepcopy(stopping_criteria)
153
+ if stopping_max_length is not None and stopping_max_length != max_length:
154
+ warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", UserWarning)
155
+ elif stopping_max_length is None:
156
+ new_stopping_criteria.append(MaxLengthCriteria(max_length=max_length))
157
+ return new_stopping_criteria
env-llmeval/lib/python3.10/site-packages/transformers/generation/streamers.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from queue import Queue
17
+ from typing import TYPE_CHECKING, Optional
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from ..models.auto import AutoTokenizer
22
+
23
+
24
+ class BaseStreamer:
25
+ """
26
+ Base class from which `.generate()` streamers should inherit.
27
+ """
28
+
29
+ def put(self, value):
30
+ """Function that is called by `.generate()` to push new tokens"""
31
+ raise NotImplementedError()
32
+
33
+ def end(self):
34
+ """Function that is called by `.generate()` to signal the end of generation"""
35
+ raise NotImplementedError()
36
+
37
+
38
+ class TextStreamer(BaseStreamer):
39
+ """
40
+ Simple text streamer that prints the token(s) to stdout as soon as entire words are formed.
41
+
42
+ <Tip warning={true}>
43
+
44
+ The API for the streamer classes is still under development and may change in the future.
45
+
46
+ </Tip>
47
+
48
+ Parameters:
49
+ tokenizer (`AutoTokenizer`):
50
+ The tokenized used to decode the tokens.
51
+ skip_prompt (`bool`, *optional*, defaults to `False`):
52
+ Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
53
+ decode_kwargs (`dict`, *optional*):
54
+ Additional keyword arguments to pass to the tokenizer's `decode` method.
55
+
56
+ Examples:
57
+
58
+ ```python
59
+ >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
60
+
61
+ >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
62
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
63
+ >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
64
+ >>> streamer = TextStreamer(tok)
65
+
66
+ >>> # Despite returning the usual output, the streamer will also print the generated text to stdout.
67
+ >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20)
68
+ An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,
69
+ ```
70
+ """
71
+
72
+ def __init__(self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, **decode_kwargs):
73
+ self.tokenizer = tokenizer
74
+ self.skip_prompt = skip_prompt
75
+ self.decode_kwargs = decode_kwargs
76
+
77
+ # variables used in the streaming process
78
+ self.token_cache = []
79
+ self.print_len = 0
80
+ self.next_tokens_are_prompt = True
81
+
82
+ def put(self, value):
83
+ """
84
+ Receives tokens, decodes them, and prints them to stdout as soon as they form entire words.
85
+ """
86
+ if len(value.shape) > 1 and value.shape[0] > 1:
87
+ raise ValueError("TextStreamer only supports batch size 1")
88
+ elif len(value.shape) > 1:
89
+ value = value[0]
90
+
91
+ if self.skip_prompt and self.next_tokens_are_prompt:
92
+ self.next_tokens_are_prompt = False
93
+ return
94
+
95
+ # Add the new token to the cache and decodes the entire thing.
96
+ self.token_cache.extend(value.tolist())
97
+ text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
98
+
99
+ # After the symbol for a new line, we flush the cache.
100
+ if text.endswith("\n"):
101
+ printable_text = text[self.print_len :]
102
+ self.token_cache = []
103
+ self.print_len = 0
104
+ # If the last token is a CJK character, we print the characters.
105
+ elif len(text) > 0 and self._is_chinese_char(ord(text[-1])):
106
+ printable_text = text[self.print_len :]
107
+ self.print_len += len(printable_text)
108
+ # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
109
+ # which may change with the subsequent token -- there are probably smarter ways to do this!)
110
+ else:
111
+ printable_text = text[self.print_len : text.rfind(" ") + 1]
112
+ self.print_len += len(printable_text)
113
+
114
+ self.on_finalized_text(printable_text)
115
+
116
+ def end(self):
117
+ """Flushes any remaining cache and prints a newline to stdout."""
118
+ # Flush the cache, if it exists
119
+ if len(self.token_cache) > 0:
120
+ text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
121
+ printable_text = text[self.print_len :]
122
+ self.token_cache = []
123
+ self.print_len = 0
124
+ else:
125
+ printable_text = ""
126
+
127
+ self.next_tokens_are_prompt = True
128
+ self.on_finalized_text(printable_text, stream_end=True)
129
+
130
+ def on_finalized_text(self, text: str, stream_end: bool = False):
131
+ """Prints the new text to stdout. If the stream is ending, also prints a newline."""
132
+ print(text, flush=True, end="" if not stream_end else None)
133
+
134
+ def _is_chinese_char(self, cp):
135
+ """Checks whether CP is the codepoint of a CJK character."""
136
+ # This defines a "chinese character" as anything in the CJK Unicode block:
137
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
138
+ #
139
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
140
+ # despite its name. The modern Korean Hangul alphabet is a different block,
141
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
142
+ # space-separated words, so they are not treated specially and handled
143
+ # like the all of the other languages.
144
+ if (
145
+ (cp >= 0x4E00 and cp <= 0x9FFF)
146
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
147
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
148
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
149
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
150
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
151
+ or (cp >= 0xF900 and cp <= 0xFAFF)
152
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
153
+ ): #
154
+ return True
155
+
156
+ return False
157
+
158
+
159
+ class TextIteratorStreamer(TextStreamer):
160
+ """
161
+ Streamer that stores print-ready text in a queue, to be used by a downstream application as an iterator. This is
162
+ useful for applications that benefit from acessing the generated text in a non-blocking way (e.g. in an interactive
163
+ Gradio demo).
164
+
165
+ <Tip warning={true}>
166
+
167
+ The API for the streamer classes is still under development and may change in the future.
168
+
169
+ </Tip>
170
+
171
+ Parameters:
172
+ tokenizer (`AutoTokenizer`):
173
+ The tokenized used to decode the tokens.
174
+ skip_prompt (`bool`, *optional*, defaults to `False`):
175
+ Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
176
+ timeout (`float`, *optional*):
177
+ The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions
178
+ in `.generate()`, when it is called in a separate thread.
179
+ decode_kwargs (`dict`, *optional*):
180
+ Additional keyword arguments to pass to the tokenizer's `decode` method.
181
+
182
+ Examples:
183
+
184
+ ```python
185
+ >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
186
+ >>> from threading import Thread
187
+
188
+ >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
189
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
190
+ >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
191
+ >>> streamer = TextIteratorStreamer(tok)
192
+
193
+ >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
194
+ >>> generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20)
195
+ >>> thread = Thread(target=model.generate, kwargs=generation_kwargs)
196
+ >>> thread.start()
197
+ >>> generated_text = ""
198
+ >>> for new_text in streamer:
199
+ ... generated_text += new_text
200
+ >>> generated_text
201
+ 'An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,'
202
+ ```
203
+ """
204
+
205
+ def __init__(
206
+ self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs
207
+ ):
208
+ super().__init__(tokenizer, skip_prompt, **decode_kwargs)
209
+ self.text_queue = Queue()
210
+ self.stop_signal = None
211
+ self.timeout = timeout
212
+
213
+ def on_finalized_text(self, text: str, stream_end: bool = False):
214
+ """Put the new text in the queue. If the stream is ending, also put a stop signal in the queue."""
215
+ self.text_queue.put(text, timeout=self.timeout)
216
+ if stream_end:
217
+ self.text_queue.put(self.stop_signal, timeout=self.timeout)
218
+
219
+ def __iter__(self):
220
+ return self
221
+
222
+ def __next__(self):
223
+ value = self.text_queue.get(timeout=self.timeout)
224
+ if value == self.stop_signal:
225
+ raise StopIteration()
226
+ else:
227
+ return value
env-llmeval/lib/python3.10/site-packages/transformers/generation/tf_logits_process.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import inspect
17
+ from typing import List, Tuple
18
+
19
+ import numpy as np
20
+ import tensorflow as tf
21
+
22
+ from ..tf_utils import stable_softmax
23
+ from ..utils import add_start_docstrings
24
+ from ..utils.logging import get_logger
25
+
26
+
27
+ logger = get_logger(__name__)
28
+
29
+
30
+ TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
31
+ Args:
32
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
33
+ Indices of input sequence tokens in the vocabulary.
34
+
35
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
36
+ [`PreTrainedTokenizer.__call__`] for details.
37
+
38
+ [What are input IDs?](../glossary#input-ids)
39
+ scores (`tf.Tensor` of shape `(batch_size, config.vocab_size)`):
40
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
41
+ search or log softmax for each vocabulary token when using beam search.
42
+ cur_len (`int`):
43
+ The current length of valid input sequence tokens. In the TF implementation, the input_ids' sequence length
44
+ is the maximum length generate can produce, and we need to know which of its tokens are valid.
45
+ kwargs (`Dict[str, Any]`, *optional*):
46
+ Additional logits processor specific kwargs.
47
+
48
+ Return:
49
+ `tf.Tensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
50
+ """
51
+
52
+
53
+ class TFLogitsProcessor:
54
+ """Abstract base class for all logit processors that can be applied during generation."""
55
+
56
+ @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
57
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
58
+ """TF method for processing logits."""
59
+ raise NotImplementedError(
60
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
61
+ )
62
+
63
+
64
+ class TFLogitsWarper:
65
+ """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
66
+
67
+ @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
68
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
69
+ """TF method for warping logits."""
70
+ raise NotImplementedError(
71
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
72
+ )
73
+
74
+
75
+ class TFLogitsProcessorList(list):
76
+ """
77
+ This class can be used to create a list of [`TFLogitsProcessor`] to subsequently process a `scores` input tensor.
78
+ This class inherits from list and adds a specific *__call__* method to apply each [`TFLogitsProcessor`] to the
79
+ inputs.
80
+ """
81
+
82
+ @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
83
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int, **kwargs) -> tf.Tensor:
84
+ for processor in self:
85
+ function_args = inspect.signature(processor.__call__).parameters
86
+ if len(function_args) > 3:
87
+ if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
88
+ raise ValueError(
89
+ f"Make sure that all the required parameters: {list(function_args.keys())} for "
90
+ f"{processor.__class__} are passed to the logits processor."
91
+ )
92
+ scores = processor(input_ids, scores, cur_len, **kwargs)
93
+ else:
94
+ scores = processor(input_ids, scores, cur_len)
95
+ return scores
96
+
97
+
98
+ class TFTemperatureLogitsWarper(TFLogitsWarper):
99
+ r"""
100
+ [`TFLogitsWarper`] for temperature (exponential scaling output probability distribution).
101
+
102
+ Args:
103
+ temperature (`float`):
104
+ The value used to module the logits distribution.
105
+ """
106
+
107
+ def __init__(self, temperature: float):
108
+ if not isinstance(temperature, float) or not (temperature > 0):
109
+ raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
110
+
111
+ self.temperature = temperature
112
+
113
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
114
+ scores = scores / self.temperature
115
+ return scores
116
+
117
+
118
+ class TFTopKLogitsWarper(TFLogitsWarper):
119
+ r"""
120
+ [`TFLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.
121
+
122
+ Args:
123
+ top_k (`int`):
124
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
125
+ filter_value (`float`, *optional*, defaults to -inf):
126
+ All filtered values will be set to this float value.
127
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
128
+ Minimum number of tokens that cannot be filtered.
129
+ """
130
+
131
+ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
132
+ if not isinstance(top_k, int) or top_k <= 0:
133
+ raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
134
+
135
+ self.top_k = max(top_k, min_tokens_to_keep)
136
+ self.filter_value = filter_value
137
+
138
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
139
+ top_k = min(self.top_k, scores.shape[-1]) # Safety check
140
+ # Boolean mask containing all tokens with a probability less than the last token of the top-k
141
+ indices_to_remove = scores < tf.math.top_k(scores, k=top_k)[0][..., -1:]
142
+ next_scores = tf.where(indices_to_remove, self.filter_value, scores)
143
+ return next_scores
144
+
145
+
146
+ class TFTopPLogitsWarper(TFLogitsWarper):
147
+ """
148
+ [`TFLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off.
149
+
150
+ Args:
151
+ top_p (`float`):
152
+ If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
153
+ higher are kept for generation.
154
+ filter_value (`float`, *optional*, defaults to -inf):
155
+ All filtered values will be set to this float value.
156
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
157
+ Minimum number of tokens that cannot be filtered.
158
+ """
159
+
160
+ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
161
+ if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
162
+ raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
163
+ if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
164
+ raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
165
+
166
+ self.top_p = top_p
167
+ self.filter_value = filter_value
168
+ self.min_tokens_to_keep = min_tokens_to_keep
169
+
170
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
171
+ topk_scores, topk_indices = tf.math.top_k(scores, scores.shape[-1])
172
+
173
+ mask_scores = tf.fill(scores.shape, self.filter_value)
174
+ cumulative_probs = tf.math.cumsum(stable_softmax(topk_scores, axis=-1), axis=-1)
175
+ score_mask = cumulative_probs < self.top_p
176
+
177
+ # Also include the token that is higher than top_p (the first false = shift and insert a True on the left)
178
+ score_mask = tf.concat((tf.ones([score_mask.shape[0], 1], dtype=tf.bool), score_mask[:, :-1]), axis=-1)
179
+
180
+ # Ensure min tokens to keep
181
+ score_mask = tf.concat(
182
+ (
183
+ tf.ones([score_mask.shape[0], self.min_tokens_to_keep], dtype=tf.bool),
184
+ score_mask[:, self.min_tokens_to_keep :],
185
+ ),
186
+ axis=-1,
187
+ )
188
+
189
+ # Mask the values that do not fit the criteria
190
+ topk_next_scores = tf.where(score_mask, topk_scores, mask_scores)
191
+
192
+ # Undo the topk sorting: converts the 2D matrix of per-row original indices of shape (batch_size, vocab_size)
193
+ # to a 3D tensor of shape (batch_size, vocab_size, 2) containing the original score coordinate, from which we
194
+ # can scatter (i.e. `scatter_indices[row, col, :]` is a tensor containing `[row, topk_indices[row, col]]`)
195
+ scatter_rows = tf.tile(tf.expand_dims(tf.range(topk_indices.shape[0]), axis=-1), [1, topk_indices.shape[-1]])
196
+ scatter_indices = tf.stack((scatter_rows, topk_indices), axis=-1)
197
+ next_scores = tf.scatter_nd(scatter_indices, topk_next_scores, shape=topk_next_scores.shape)
198
+
199
+ return next_scores
200
+
201
+
202
+ class TFMinLengthLogitsProcessor(TFLogitsProcessor):
203
+ r"""
204
+ [`TFLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
205
+
206
+ Args:
207
+ min_length (`int`):
208
+ The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
209
+ eos_token_id (`int`):
210
+ The id of the *end-of-sequence* token.
211
+ """
212
+
213
+ def __init__(self, min_length: int, eos_token_id: int):
214
+ if not isinstance(min_length, int) or min_length < 0:
215
+ raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
216
+
217
+ if not isinstance(eos_token_id, int) or eos_token_id < 0:
218
+ raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
219
+
220
+ self.min_length = min_length
221
+ self.eos_token_id = eos_token_id
222
+
223
+ def _apply_eos_token_mask(self, scores: tf.Tensor) -> tf.Tensor:
224
+ eos_token_id_mask = tf.range(scores.shape[-1]) == self.eos_token_id
225
+ scores = tf.where(eos_token_id_mask, float("-inf"), scores)
226
+ return scores
227
+
228
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
229
+ # applies eos token masking if the first argument is true
230
+ scores = tf.cond(
231
+ tf.less(cur_len, self.min_length),
232
+ lambda: self._apply_eos_token_mask(scores),
233
+ lambda: tf.identity(scores),
234
+ )
235
+ return scores
236
+
237
+
238
+ class TFRepetitionPenaltyLogitsProcessor(TFLogitsProcessor):
239
+ r"""
240
+ [`TFLogitsProcessor`] enforcing an exponential penalty on repeated sequences.
241
+
242
+ Args:
243
+ repetition_penalty (`float`):
244
+ The parameter for repetition penalty. 1.0 means no penalty. See [this
245
+ paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
246
+ """
247
+
248
+ def __init__(self, penalty: float):
249
+ if not isinstance(penalty, float) or not (penalty > 0):
250
+ raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
251
+
252
+ self.penalty = penalty
253
+
254
+ def _create_score_penalties(self, input_ids: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
255
+ # We want to populate the penalties in the positions of `input_ids`. Since XLA can't handle shapes unknown
256
+ # before runtime, `tf.unique` can't be used. Therefore, we may have redundant updates, when a given row has
257
+ # the same token multiple times.
258
+
259
+ # Gathers the penalties to apply
260
+ logit_penalties = tf.gather(logits, input_ids, axis=1, batch_dims=1)
261
+ logit_penalties = tf.where(logit_penalties > 0, 1 / self.penalty, logit_penalties)
262
+ logit_penalties = tf.where(logit_penalties < 0, self.penalty, logit_penalties)
263
+
264
+ # Scatters the penalties
265
+ token_penalties = tf.ones(logits.shape)
266
+ batch_size = input_ids.shape[0]
267
+ seq_len = tf.shape(input_ids)[1] # the sequence length has dynamic size, hence the dynamic shape
268
+ indexable_prev_input_ids = tf.concat(
269
+ (
270
+ tf.expand_dims(tf.repeat(tf.range(batch_size), seq_len), axis=-1),
271
+ tf.expand_dims(tf.reshape(input_ids, [-1]), axis=-1),
272
+ ),
273
+ axis=1,
274
+ )
275
+ token_penalties = tf.tensor_scatter_nd_update(
276
+ token_penalties, indices=indexable_prev_input_ids, updates=tf.reshape(logit_penalties, [-1])
277
+ )
278
+ return token_penalties
279
+
280
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
281
+ score_penalties = self._create_score_penalties(input_ids[:, :cur_len], scores)
282
+
283
+ scores = tf.math.multiply(scores, score_penalties)
284
+
285
+ return scores
286
+
287
+
288
+ class TFNoBadWordsLogitsProcessor(TFLogitsProcessor):
289
+ """
290
+ [`TFLogitsProcessor`] that enforces that specified sequences will never be sampled.
291
+
292
+ Args:
293
+ bad_words_ids (`List[List[int]]`):
294
+ List of list of token ids that are not allowed to be generated. In order to get the tokens of the words
295
+ that should not appear in the generated text, make sure to set `add_prefix_space=True` when initializing
296
+ the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The `add_prefix_space`
297
+ argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours come from
298
+ `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers).
299
+ eos_token_id (`int`):
300
+ The id of the *end-of-sequence* token.
301
+ """
302
+
303
+ def __init__(self, bad_words_ids: List[List[int]], eos_token_id: int):
304
+ if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0:
305
+ raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.")
306
+ if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids):
307
+ raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.")
308
+ if any(
309
+ any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids)
310
+ for bad_word_ids in bad_words_ids
311
+ ):
312
+ raise ValueError(
313
+ f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}."
314
+ )
315
+
316
+ # stores the information about bad words in three tensors:
317
+ # 1. a rectangular tensor with the forbidden sequences (padded with `-1`), for full data comparisons
318
+ self.bad_word_seqs_ids = tf.ragged.constant(bad_words_ids).to_tensor(default_value=-1)
319
+ # 2. a tensor with the unpadded length of each forbidden sequence, for quick length comparisons
320
+ bad_word_seqs_len = [len(bad_words) for bad_words in bad_words_ids]
321
+ if any(word_len == 0 for word_len in bad_word_seqs_len):
322
+ raise ValueError(f"Banned words token sequences {bad_words_ids} cannot have an empty list")
323
+ self.bad_word_seqs_len = tf.convert_to_tensor(bad_word_seqs_len, dtype=tf.int32)
324
+ # 3. a tensor containing the last token for each sequence, for easy access to the tokens that may be banned
325
+ self.seq_forbidden_tokens = tf.convert_to_tensor([bad_words[-1] for bad_words in bad_words_ids])
326
+
327
+ def _calc_row_banned_bad_tokens(self, row_input_ids: tf.Tensor) -> tf.Tensor:
328
+ def _tokens_match(bad_word_seq_number):
329
+ def _len_one():
330
+ # If the bad sequence only has one token, always mask it
331
+ return tf.cond(
332
+ tf.math.equal(self.bad_word_seqs_len[bad_word_seq_number], 1),
333
+ lambda: tf.ones((), dtype=tf.bool),
334
+ _len_greater_than_cur_len,
335
+ )
336
+
337
+ def _len_greater_than_cur_len():
338
+ # Otherwise, if the bad sequence is longer than the current length they can't ever match
339
+ return tf.cond(
340
+ tf.math.greater(self.bad_word_seqs_len[bad_word_seq_number], tf.shape(row_input_ids)[0]),
341
+ lambda: tf.zeros((), dtype=tf.bool),
342
+ _match_found,
343
+ )
344
+
345
+ def _match_found():
346
+ # Finaly, runs the actual comparison. Can only be called if the previous comparisons do not yield
347
+ # an answer (otherwise we get indexing exceptions)
348
+ compare_len = self.bad_word_seqs_len[bad_word_seq_number] - 1
349
+ return tf.cond(
350
+ tf.math.reduce_all(
351
+ tf.math.equal(
352
+ row_input_ids[-compare_len:], self.bad_word_seqs_ids[bad_word_seq_number, :compare_len]
353
+ )
354
+ ),
355
+ lambda: tf.ones((), dtype=tf.bool),
356
+ lambda: tf.zeros((), dtype=tf.bool),
357
+ )
358
+
359
+ match = _len_one()
360
+ return match
361
+
362
+ # Compares the current row against all bad word sequences, obtaining a mask with the matches.
363
+ match_mask = tf.map_fn(_tokens_match, tf.range(self.bad_word_seqs_ids.shape[0]), fn_output_signature=tf.bool)
364
+ row_banned_tokens = self.seq_forbidden_tokens[match_mask]
365
+ return row_banned_tokens
366
+
367
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
368
+ # We want to mask some banned tokens, at a score level. Since the banned tokens depend on the previous
369
+ # `input_ids`, they may have a different length for each row, and they may even be empty for some rows.
370
+ # To remain simple and XLA-compatible, we work on a per-row fashion.
371
+ # TODO (Joao): this function might trigger XLA retracing as `cur_len` increases. Fix it if it becomes
372
+ # a frequent choke point. (make `cur_len` a tensor?)
373
+ def _get_row_updated_score(row_inputs: Tuple[tf.Tensor]) -> tf.Tensor:
374
+ row_input_ids, row_score = row_inputs
375
+ banned_tokens = self._calc_row_banned_bad_tokens(row_input_ids[:cur_len])
376
+ banned_tokens_mask = tf.scatter_nd(
377
+ indices=tf.expand_dims(banned_tokens, axis=-1),
378
+ updates=tf.ones_like(banned_tokens, dtype=tf.bool),
379
+ shape=row_score.shape,
380
+ )
381
+ row_score = tf.where(banned_tokens_mask, -float("inf"), row_score)
382
+ return row_score
383
+
384
+ scores = tf.map_fn(_get_row_updated_score, (input_ids, scores), fn_output_signature=tf.float32)
385
+ return scores
386
+
387
+
388
+ class TFNoRepeatNGramLogitsProcessor(TFLogitsProcessor):
389
+ r"""
390
+ [`TFLogitsProcessor`] that enforces no repetition of n-grams. See
391
+ [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345).
392
+
393
+ Args:
394
+ ngram_size (`int`):
395
+ All ngrams of size `ngram_size` can only occur once.
396
+ """
397
+
398
+ def __init__(self, ngram_size: int):
399
+ if not isinstance(ngram_size, int) or ngram_size <= 0:
400
+ raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}")
401
+ self.ngram_size = ngram_size
402
+
403
+ def calc_banned_ngram_tokens(self, input_ids, num_hypos, cur_len):
404
+ # Copied from fairseq for no_repeat_ngram in beam_search
405
+ if cur_len + 1 < self.ngram_size:
406
+ # return no banned tokens if we haven't generated ngram_size tokens yet
407
+ return [[] for _ in range(num_hypos)]
408
+ generated_ngrams = [{} for _ in range(num_hypos)]
409
+ prev_input_ids = input_ids[:, :cur_len]
410
+ for idx in range(num_hypos):
411
+ gen_tokens = prev_input_ids[idx].numpy().tolist()
412
+ generated_ngram = generated_ngrams[idx]
413
+ for ngram in zip(*[gen_tokens[i:] for i in range(self.ngram_size)]):
414
+ prev_ngram_tuple = tuple(ngram[:-1])
415
+ generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
416
+
417
+ def _get_generated_ngrams(hypo_idx):
418
+ # Before decoding the next token, prevent decoding of ngrams that have already appeared
419
+ start_idx = cur_len + 1 - self.ngram_size
420
+ ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist())
421
+ return generated_ngrams[hypo_idx].get(ngram_idx, [])
422
+
423
+ banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
424
+
425
+ return banned_tokens
426
+
427
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
428
+ # TODO (joao): enable XLA on this logits processor. See discussion and attempts in
429
+ # https://github.com/huggingface/transformers/pull/16974
430
+ if not tf.executing_eagerly():
431
+ raise NotImplementedError("TFNoRepeatNGramLogitsProcessor is only implemented for eager execution.")
432
+
433
+ batch_size, vocab_size = scores.shape
434
+ banned_tokens = self.calc_banned_ngram_tokens(input_ids, batch_size, cur_len)
435
+
436
+ # create banned_tokens boolean mask
437
+ banned_tokens_indices_mask = []
438
+ for banned_tokens_slice in banned_tokens:
439
+ banned_tokens_indices_mask.append(
440
+ [True if token in banned_tokens_slice else False for token in range(vocab_size)]
441
+ )
442
+
443
+ scores = tf.where(tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf"), scores)
444
+
445
+ return scores
446
+
447
+
448
+ class TFForcedBOSTokenLogitsProcessor(TFLogitsProcessor):
449
+ r"""
450
+ [`TFLogitsProcessor`] that enforces the specified token as the first generated token.
451
+
452
+ Args:
453
+ bos_token_id (`int`):
454
+ The id of the token to force as the first generated token.
455
+ """
456
+
457
+ def __init__(self, bos_token_id: int):
458
+ if bos_token_id < 0:
459
+ raise ValueError(f"The forced bos token id must be a non-negative integer, got {bos_token_id}")
460
+ self.bos_token_id = bos_token_id
461
+
462
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
463
+ if cur_len == 1:
464
+ batch_size, num_tokens = scores.shape
465
+ # sets the score to 0 in the bos_token_id column
466
+ scores = tf.zeros((batch_size, 1))
467
+ # sets the score to -inf everywhere else
468
+ if self.bos_token_id > 0:
469
+ scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.bos_token_id)), scores), axis=-1)
470
+ if self.bos_token_id < (num_tokens - 1):
471
+ scores = tf.concat(
472
+ (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.bos_token_id))),
473
+ axis=-1,
474
+ )
475
+ return scores
476
+
477
+
478
+ class TFForcedEOSTokenLogitsProcessor(TFLogitsProcessor):
479
+ r"""
480
+ [`TFLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
481
+
482
+ Args:
483
+ max_length (`int`):
484
+ The maximum length of the sequence to be generated.
485
+ eos_token_id (`int`):
486
+ The id of the token to force as the last generated token when `max_length` is reached.
487
+ """
488
+
489
+ def __init__(self, max_length: int, eos_token_id: int):
490
+ self.max_length = max_length
491
+ if eos_token_id < 0:
492
+ raise ValueError(f"The forced eos token id must be a non-negative integer, got {eos_token_id}")
493
+ self.eos_token_id = eos_token_id
494
+
495
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
496
+ if cur_len == self.max_length - 1:
497
+ batch_size, num_tokens = scores.shape
498
+ # sets the score to 0 in the eos_token_id column
499
+ scores = tf.zeros((batch_size, 1))
500
+ # sets the score to -inf everywhere else
501
+ if self.eos_token_id > 0:
502
+ scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.eos_token_id)), scores), axis=-1)
503
+ if self.eos_token_id < (num_tokens - 1):
504
+ scores = tf.concat(
505
+ (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.eos_token_id))),
506
+ axis=-1,
507
+ )
508
+ return scores
509
+
510
+
511
+ class TFSuppressTokensAtBeginLogitsProcessor(TFLogitsProcessor):
512
+ r"""
513
+ [`TFSuppressTokensAtBeginLogitsProcessor`] suppresses a list of tokens as soon as the `generate` function starts
514
+ generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not
515
+ sampled at the begining of the generation.
516
+ """
517
+
518
+ def __init__(self, begin_suppress_tokens, begin_index):
519
+ self.begin_suppress_tokens = list(begin_suppress_tokens)
520
+ self.begin_index = begin_index
521
+
522
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
523
+ scores = tf.cond(
524
+ tf.equal(cur_len, self.begin_index),
525
+ lambda: tf.tensor_scatter_nd_update(
526
+ scores,
527
+ indices=[[i, token] for i in range(scores.shape[0]) for token in self.begin_suppress_tokens],
528
+ updates=[-float("inf") for _ in range(scores.shape[0] * len(self.begin_suppress_tokens))],
529
+ ),
530
+ lambda: scores,
531
+ )
532
+ return scores
533
+
534
+
535
+ class TFSuppressTokensLogitsProcessor(TFLogitsProcessor):
536
+ r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they
537
+ are not sampled."""
538
+
539
+ def __init__(self, suppress_tokens):
540
+ self.suppress_tokens = list(suppress_tokens)
541
+
542
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
543
+ scores = tf.tensor_scatter_nd_update(
544
+ scores,
545
+ indices=[[i, token] for i in range(scores.shape[0]) for token in self.suppress_tokens],
546
+ updates=[-float("inf") for _ in range(scores.shape[0] * len(self.suppress_tokens))],
547
+ )
548
+ return scores
549
+
550
+
551
+ class TFForceTokensLogitsProcessor(TFLogitsProcessor):
552
+ r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token
553
+ indices that will be forced before sampling. The processor will set their log probs to `0` and all other tokens to
554
+ `-inf` so that they are sampled at their corresponding index."""
555
+
556
+ def __init__(self, force_token_map: List[List[int]]):
557
+ force_token_map = dict(force_token_map)
558
+ # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
559
+ # index of the array corresponds to the index of the token to be forced, for XLA compatibility.
560
+ # Indexes without forced tokens will have an negative value.
561
+ force_token_array = np.ones((max(force_token_map.keys()) + 1), dtype=np.int32) * -1
562
+ for index, token in force_token_map.items():
563
+ if token is not None:
564
+ force_token_array[index] = token
565
+ self.force_token_array = tf.convert_to_tensor(force_token_array, dtype=tf.int32)
566
+
567
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
568
+ def _force_token(generation_idx):
569
+ batch_size = scores.shape[0]
570
+ current_token = self.force_token_array[generation_idx]
571
+
572
+ new_scores = tf.ones_like(scores, dtype=scores.dtype) * -float("inf")
573
+ indices = tf.stack((tf.range(batch_size), tf.tile([current_token], [batch_size])), axis=1)
574
+ updates = tf.zeros((batch_size,), dtype=scores.dtype)
575
+ new_scores = tf.tensor_scatter_nd_update(new_scores, indices, updates)
576
+ return new_scores
577
+
578
+ scores = tf.cond(
579
+ tf.greater_equal(cur_len, tf.shape(self.force_token_array)[0]),
580
+ # If the current length is geq than the length of force_token_array, the processor does nothing.
581
+ lambda: tf.identity(scores),
582
+ # Otherwise, it may force a certain token.
583
+ lambda: tf.cond(
584
+ tf.greater_equal(self.force_token_array[cur_len], 0),
585
+ # Only valid (positive) tokens are forced
586
+ lambda: _force_token(cur_len),
587
+ # Otherwise, the processor does nothing.
588
+ lambda: scores,
589
+ ),
590
+ )
591
+ return scores