applied-ai-018 commited on
Commit
a29188b
·
verified ·
1 Parent(s): 8cd197d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__init__.py +23 -0
  4. llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__init__.py +98 -0
  8. llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/squad_metrics.py +780 -0
  11. llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__init__.py +18 -0
  12. llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/transformers/data/processors/glue.py +643 -0
  18. llmeval-env/lib/python3.10/site-packages/transformers/data/processors/squad.py +845 -0
  19. llmeval-env/lib/python3.10/site-packages/transformers/data/processors/utils.py +349 -0
  20. llmeval-env/lib/python3.10/site-packages/transformers/data/processors/xnli.py +97 -0
  21. llmeval-env/lib/python3.10/site-packages/transformers/generation/__init__.py +310 -0
  22. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/transformers/generation/beam_constraints.py +521 -0
  36. llmeval-env/lib/python3.10/site-packages/transformers/generation/beam_search.py +1005 -0
  37. llmeval-env/lib/python3.10/site-packages/transformers/generation/candidate_generator.py +425 -0
  38. llmeval-env/lib/python3.10/site-packages/transformers/generation/configuration_utils.py +1092 -0
  39. llmeval-env/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py +544 -0
  40. llmeval-env/lib/python3.10/site-packages/transformers/generation/flax_utils.py +1022 -0
  41. llmeval-env/lib/python3.10/site-packages/transformers/generation/logits_process.py +0 -0
  42. llmeval-env/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py +189 -0
  43. llmeval-env/lib/python3.10/site-packages/transformers/generation/streamers.py +227 -0
  44. llmeval-env/lib/python3.10/site-packages/transformers/generation/tf_logits_process.py +591 -0
  45. llmeval-env/lib/python3.10/site-packages/transformers/generation/tf_utils.py +0 -0
  46. llmeval-env/lib/python3.10/site-packages/transformers/generation/utils.py +0 -0
  47. llmeval-env/lib/python3.10/site-packages/transformers/onnx/__init__.py +49 -0
  48. llmeval-env/lib/python3.10/site-packages/transformers/onnx/__main__.py +242 -0
  49. llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/__init__.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/__main__.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc ADDED
Binary file (46.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__init__.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .glue import GlueDataset, GlueDataTrainingArguments
16
+ from .language_modeling import (
17
+ LineByLineTextDataset,
18
+ LineByLineWithRefDataset,
19
+ LineByLineWithSOPTextDataset,
20
+ TextDataset,
21
+ TextDatasetForNextSentencePrediction,
22
+ )
23
+ from .squad import SquadDataset, SquadDataTrainingArguments
llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (560 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc ADDED
Binary file (4.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__init__.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Licensed under the Apache License, Version 2.0 (the "License");
2
+ # you may not use this file except in compliance with the License.
3
+ # You may obtain a copy of the License at
4
+ #
5
+ # http://www.apache.org/licenses/LICENSE-2.0
6
+ #
7
+ # Unless required by applicable law or agreed to in writing, software
8
+ # distributed under the License is distributed on an "AS IS" BASIS,
9
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10
+ # See the License for the specific language governing permissions and
11
+ # limitations under the License.
12
+
13
+ import warnings
14
+
15
+ from ...utils import is_sklearn_available, requires_backends
16
+
17
+
18
+ if is_sklearn_available():
19
+ from scipy.stats import pearsonr, spearmanr
20
+ from sklearn.metrics import f1_score, matthews_corrcoef
21
+
22
+
23
+ DEPRECATION_WARNING = (
24
+ "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
25
+ "library. You can have a look at this example script for pointers: "
26
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
27
+ )
28
+
29
+
30
+ def simple_accuracy(preds, labels):
31
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
32
+ requires_backends(simple_accuracy, "sklearn")
33
+ return (preds == labels).mean()
34
+
35
+
36
+ def acc_and_f1(preds, labels):
37
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
38
+ requires_backends(acc_and_f1, "sklearn")
39
+ acc = simple_accuracy(preds, labels)
40
+ f1 = f1_score(y_true=labels, y_pred=preds)
41
+ return {
42
+ "acc": acc,
43
+ "f1": f1,
44
+ "acc_and_f1": (acc + f1) / 2,
45
+ }
46
+
47
+
48
+ def pearson_and_spearman(preds, labels):
49
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
50
+ requires_backends(pearson_and_spearman, "sklearn")
51
+ pearson_corr = pearsonr(preds, labels)[0]
52
+ spearman_corr = spearmanr(preds, labels)[0]
53
+ return {
54
+ "pearson": pearson_corr,
55
+ "spearmanr": spearman_corr,
56
+ "corr": (pearson_corr + spearman_corr) / 2,
57
+ }
58
+
59
+
60
+ def glue_compute_metrics(task_name, preds, labels):
61
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
62
+ requires_backends(glue_compute_metrics, "sklearn")
63
+ assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}"
64
+ if task_name == "cola":
65
+ return {"mcc": matthews_corrcoef(labels, preds)}
66
+ elif task_name == "sst-2":
67
+ return {"acc": simple_accuracy(preds, labels)}
68
+ elif task_name == "mrpc":
69
+ return acc_and_f1(preds, labels)
70
+ elif task_name == "sts-b":
71
+ return pearson_and_spearman(preds, labels)
72
+ elif task_name == "qqp":
73
+ return acc_and_f1(preds, labels)
74
+ elif task_name == "mnli":
75
+ return {"mnli/acc": simple_accuracy(preds, labels)}
76
+ elif task_name == "mnli-mm":
77
+ return {"mnli-mm/acc": simple_accuracy(preds, labels)}
78
+ elif task_name == "qnli":
79
+ return {"acc": simple_accuracy(preds, labels)}
80
+ elif task_name == "rte":
81
+ return {"acc": simple_accuracy(preds, labels)}
82
+ elif task_name == "wnli":
83
+ return {"acc": simple_accuracy(preds, labels)}
84
+ elif task_name == "hans":
85
+ return {"acc": simple_accuracy(preds, labels)}
86
+ else:
87
+ raise KeyError(task_name)
88
+
89
+
90
+ def xnli_compute_metrics(task_name, preds, labels):
91
+ warnings.warn(DEPRECATION_WARNING, FutureWarning)
92
+ requires_backends(xnli_compute_metrics, "sklearn")
93
+ if len(preds) != len(labels):
94
+ raise ValueError(f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}")
95
+ if task_name == "xnli":
96
+ return {"acc": simple_accuracy(preds, labels)}
97
+ else:
98
+ raise KeyError(task_name)
llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.55 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc ADDED
Binary file (16.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/squad_metrics.py ADDED
@@ -0,0 +1,780 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """
15
+ Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was modified by XLNet authors to
16
+ update `find_best_threshold` scripts for SQuAD V2.0
17
+
18
+ In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an
19
+ additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted
20
+ probability that a question is unanswerable.
21
+ """
22
+
23
+
24
+ import collections
25
+ import json
26
+ import math
27
+ import re
28
+ import string
29
+
30
+ from ...models.bert import BasicTokenizer
31
+ from ...utils import logging
32
+
33
+
34
+ logger = logging.get_logger(__name__)
35
+
36
+
37
+ def normalize_answer(s):
38
+ """Lower text and remove punctuation, articles and extra whitespace."""
39
+
40
+ def remove_articles(text):
41
+ regex = re.compile(r"\b(a|an|the)\b", re.UNICODE)
42
+ return re.sub(regex, " ", text)
43
+
44
+ def white_space_fix(text):
45
+ return " ".join(text.split())
46
+
47
+ def remove_punc(text):
48
+ exclude = set(string.punctuation)
49
+ return "".join(ch for ch in text if ch not in exclude)
50
+
51
+ def lower(text):
52
+ return text.lower()
53
+
54
+ return white_space_fix(remove_articles(remove_punc(lower(s))))
55
+
56
+
57
+ def get_tokens(s):
58
+ if not s:
59
+ return []
60
+ return normalize_answer(s).split()
61
+
62
+
63
+ def compute_exact(a_gold, a_pred):
64
+ return int(normalize_answer(a_gold) == normalize_answer(a_pred))
65
+
66
+
67
+ def compute_f1(a_gold, a_pred):
68
+ gold_toks = get_tokens(a_gold)
69
+ pred_toks = get_tokens(a_pred)
70
+ common = collections.Counter(gold_toks) & collections.Counter(pred_toks)
71
+ num_same = sum(common.values())
72
+ if len(gold_toks) == 0 or len(pred_toks) == 0:
73
+ # If either is no-answer, then F1 is 1 if they agree, 0 otherwise
74
+ return int(gold_toks == pred_toks)
75
+ if num_same == 0:
76
+ return 0
77
+ precision = 1.0 * num_same / len(pred_toks)
78
+ recall = 1.0 * num_same / len(gold_toks)
79
+ f1 = (2 * precision * recall) / (precision + recall)
80
+ return f1
81
+
82
+
83
+ def get_raw_scores(examples, preds):
84
+ """
85
+ Computes the exact and f1 scores from the examples and the model predictions
86
+ """
87
+ exact_scores = {}
88
+ f1_scores = {}
89
+
90
+ for example in examples:
91
+ qas_id = example.qas_id
92
+ gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])]
93
+
94
+ if not gold_answers:
95
+ # For unanswerable questions, only correct answer is empty string
96
+ gold_answers = [""]
97
+
98
+ if qas_id not in preds:
99
+ print(f"Missing prediction for {qas_id}")
100
+ continue
101
+
102
+ prediction = preds[qas_id]
103
+ exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers)
104
+ f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers)
105
+
106
+ return exact_scores, f1_scores
107
+
108
+
109
+ def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh):
110
+ new_scores = {}
111
+ for qid, s in scores.items():
112
+ pred_na = na_probs[qid] > na_prob_thresh
113
+ if pred_na:
114
+ new_scores[qid] = float(not qid_to_has_ans[qid])
115
+ else:
116
+ new_scores[qid] = s
117
+ return new_scores
118
+
119
+
120
+ def make_eval_dict(exact_scores, f1_scores, qid_list=None):
121
+ if not qid_list:
122
+ total = len(exact_scores)
123
+ return collections.OrderedDict(
124
+ [
125
+ ("exact", 100.0 * sum(exact_scores.values()) / total),
126
+ ("f1", 100.0 * sum(f1_scores.values()) / total),
127
+ ("total", total),
128
+ ]
129
+ )
130
+ else:
131
+ total = len(qid_list)
132
+ return collections.OrderedDict(
133
+ [
134
+ ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total),
135
+ ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total),
136
+ ("total", total),
137
+ ]
138
+ )
139
+
140
+
141
+ def merge_eval(main_eval, new_eval, prefix):
142
+ for k in new_eval:
143
+ main_eval[f"{prefix}_{k}"] = new_eval[k]
144
+
145
+
146
+ def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans):
147
+ num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
148
+ cur_score = num_no_ans
149
+ best_score = cur_score
150
+ best_thresh = 0.0
151
+ qid_list = sorted(na_probs, key=lambda k: na_probs[k])
152
+ for i, qid in enumerate(qid_list):
153
+ if qid not in scores:
154
+ continue
155
+ if qid_to_has_ans[qid]:
156
+ diff = scores[qid]
157
+ else:
158
+ if preds[qid]:
159
+ diff = -1
160
+ else:
161
+ diff = 0
162
+ cur_score += diff
163
+ if cur_score > best_score:
164
+ best_score = cur_score
165
+ best_thresh = na_probs[qid]
166
+
167
+ has_ans_score, has_ans_cnt = 0, 0
168
+ for qid in qid_list:
169
+ if not qid_to_has_ans[qid]:
170
+ continue
171
+ has_ans_cnt += 1
172
+
173
+ if qid not in scores:
174
+ continue
175
+ has_ans_score += scores[qid]
176
+
177
+ return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt
178
+
179
+
180
+ def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
181
+ best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans)
182
+ best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans)
183
+ main_eval["best_exact"] = best_exact
184
+ main_eval["best_exact_thresh"] = exact_thresh
185
+ main_eval["best_f1"] = best_f1
186
+ main_eval["best_f1_thresh"] = f1_thresh
187
+ main_eval["has_ans_exact"] = has_ans_exact
188
+ main_eval["has_ans_f1"] = has_ans_f1
189
+
190
+
191
+ def find_best_thresh(preds, scores, na_probs, qid_to_has_ans):
192
+ num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k])
193
+ cur_score = num_no_ans
194
+ best_score = cur_score
195
+ best_thresh = 0.0
196
+ qid_list = sorted(na_probs, key=lambda k: na_probs[k])
197
+ for _, qid in enumerate(qid_list):
198
+ if qid not in scores:
199
+ continue
200
+ if qid_to_has_ans[qid]:
201
+ diff = scores[qid]
202
+ else:
203
+ if preds[qid]:
204
+ diff = -1
205
+ else:
206
+ diff = 0
207
+ cur_score += diff
208
+ if cur_score > best_score:
209
+ best_score = cur_score
210
+ best_thresh = na_probs[qid]
211
+ return 100.0 * best_score / len(scores), best_thresh
212
+
213
+
214
+ def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans):
215
+ best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans)
216
+ best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans)
217
+
218
+ main_eval["best_exact"] = best_exact
219
+ main_eval["best_exact_thresh"] = exact_thresh
220
+ main_eval["best_f1"] = best_f1
221
+ main_eval["best_f1_thresh"] = f1_thresh
222
+
223
+
224
+ def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0):
225
+ qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples}
226
+ has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer]
227
+ no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer]
228
+
229
+ if no_answer_probs is None:
230
+ no_answer_probs = {k: 0.0 for k in preds}
231
+
232
+ exact, f1 = get_raw_scores(examples, preds)
233
+
234
+ exact_threshold = apply_no_ans_threshold(
235
+ exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold
236
+ )
237
+ f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold)
238
+
239
+ evaluation = make_eval_dict(exact_threshold, f1_threshold)
240
+
241
+ if has_answer_qids:
242
+ has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids)
243
+ merge_eval(evaluation, has_ans_eval, "HasAns")
244
+
245
+ if no_answer_qids:
246
+ no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids)
247
+ merge_eval(evaluation, no_ans_eval, "NoAns")
248
+
249
+ if no_answer_probs:
250
+ find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer)
251
+
252
+ return evaluation
253
+
254
+
255
+ def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False):
256
+ """Project the tokenized prediction back to the original text."""
257
+
258
+ # When we created the data, we kept track of the alignment between original
259
+ # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
260
+ # now `orig_text` contains the span of our original text corresponding to the
261
+ # span that we predicted.
262
+ #
263
+ # However, `orig_text` may contain extra characters that we don't want in
264
+ # our prediction.
265
+ #
266
+ # For example, let's say:
267
+ # pred_text = steve smith
268
+ # orig_text = Steve Smith's
269
+ #
270
+ # We don't want to return `orig_text` because it contains the extra "'s".
271
+ #
272
+ # We don't want to return `pred_text` because it's already been normalized
273
+ # (the SQuAD eval script also does punctuation stripping/lower casing but
274
+ # our tokenizer does additional normalization like stripping accent
275
+ # characters).
276
+ #
277
+ # What we really want to return is "Steve Smith".
278
+ #
279
+ # Therefore, we have to apply a semi-complicated alignment heuristic between
280
+ # `pred_text` and `orig_text` to get a character-to-character alignment. This
281
+ # can fail in certain cases in which case we just return `orig_text`.
282
+
283
+ def _strip_spaces(text):
284
+ ns_chars = []
285
+ ns_to_s_map = collections.OrderedDict()
286
+ for i, c in enumerate(text):
287
+ if c == " ":
288
+ continue
289
+ ns_to_s_map[len(ns_chars)] = i
290
+ ns_chars.append(c)
291
+ ns_text = "".join(ns_chars)
292
+ return (ns_text, ns_to_s_map)
293
+
294
+ # We first tokenize `orig_text`, strip whitespace from the result
295
+ # and `pred_text`, and check if they are the same length. If they are
296
+ # NOT the same length, the heuristic has failed. If they are the same
297
+ # length, we assume the characters are one-to-one aligned.
298
+ tokenizer = BasicTokenizer(do_lower_case=do_lower_case)
299
+
300
+ tok_text = " ".join(tokenizer.tokenize(orig_text))
301
+
302
+ start_position = tok_text.find(pred_text)
303
+ if start_position == -1:
304
+ if verbose_logging:
305
+ logger.info(f"Unable to find text: '{pred_text}' in '{orig_text}'")
306
+ return orig_text
307
+ end_position = start_position + len(pred_text) - 1
308
+
309
+ (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
310
+ (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
311
+
312
+ if len(orig_ns_text) != len(tok_ns_text):
313
+ if verbose_logging:
314
+ logger.info(f"Length not equal after stripping spaces: '{orig_ns_text}' vs '{tok_ns_text}'")
315
+ return orig_text
316
+
317
+ # We then project the characters in `pred_text` back to `orig_text` using
318
+ # the character-to-character alignment.
319
+ tok_s_to_ns_map = {}
320
+ for i, tok_index in tok_ns_to_s_map.items():
321
+ tok_s_to_ns_map[tok_index] = i
322
+
323
+ orig_start_position = None
324
+ if start_position in tok_s_to_ns_map:
325
+ ns_start_position = tok_s_to_ns_map[start_position]
326
+ if ns_start_position in orig_ns_to_s_map:
327
+ orig_start_position = orig_ns_to_s_map[ns_start_position]
328
+
329
+ if orig_start_position is None:
330
+ if verbose_logging:
331
+ logger.info("Couldn't map start position")
332
+ return orig_text
333
+
334
+ orig_end_position = None
335
+ if end_position in tok_s_to_ns_map:
336
+ ns_end_position = tok_s_to_ns_map[end_position]
337
+ if ns_end_position in orig_ns_to_s_map:
338
+ orig_end_position = orig_ns_to_s_map[ns_end_position]
339
+
340
+ if orig_end_position is None:
341
+ if verbose_logging:
342
+ logger.info("Couldn't map end position")
343
+ return orig_text
344
+
345
+ output_text = orig_text[orig_start_position : (orig_end_position + 1)]
346
+ return output_text
347
+
348
+
349
+ def _get_best_indexes(logits, n_best_size):
350
+ """Get the n-best logits from a list."""
351
+ index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
352
+
353
+ best_indexes = []
354
+ for i in range(len(index_and_score)):
355
+ if i >= n_best_size:
356
+ break
357
+ best_indexes.append(index_and_score[i][0])
358
+ return best_indexes
359
+
360
+
361
+ def _compute_softmax(scores):
362
+ """Compute softmax probability over raw logits."""
363
+ if not scores:
364
+ return []
365
+
366
+ max_score = None
367
+ for score in scores:
368
+ if max_score is None or score > max_score:
369
+ max_score = score
370
+
371
+ exp_scores = []
372
+ total_sum = 0.0
373
+ for score in scores:
374
+ x = math.exp(score - max_score)
375
+ exp_scores.append(x)
376
+ total_sum += x
377
+
378
+ probs = []
379
+ for score in exp_scores:
380
+ probs.append(score / total_sum)
381
+ return probs
382
+
383
+
384
+ def compute_predictions_logits(
385
+ all_examples,
386
+ all_features,
387
+ all_results,
388
+ n_best_size,
389
+ max_answer_length,
390
+ do_lower_case,
391
+ output_prediction_file,
392
+ output_nbest_file,
393
+ output_null_log_odds_file,
394
+ verbose_logging,
395
+ version_2_with_negative,
396
+ null_score_diff_threshold,
397
+ tokenizer,
398
+ ):
399
+ """Write final predictions to the json file and log-odds of null if needed."""
400
+ if output_prediction_file:
401
+ logger.info(f"Writing predictions to: {output_prediction_file}")
402
+ if output_nbest_file:
403
+ logger.info(f"Writing nbest to: {output_nbest_file}")
404
+ if output_null_log_odds_file and version_2_with_negative:
405
+ logger.info(f"Writing null_log_odds to: {output_null_log_odds_file}")
406
+
407
+ example_index_to_features = collections.defaultdict(list)
408
+ for feature in all_features:
409
+ example_index_to_features[feature.example_index].append(feature)
410
+
411
+ unique_id_to_result = {}
412
+ for result in all_results:
413
+ unique_id_to_result[result.unique_id] = result
414
+
415
+ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
416
+ "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"]
417
+ )
418
+
419
+ all_predictions = collections.OrderedDict()
420
+ all_nbest_json = collections.OrderedDict()
421
+ scores_diff_json = collections.OrderedDict()
422
+
423
+ for example_index, example in enumerate(all_examples):
424
+ features = example_index_to_features[example_index]
425
+
426
+ prelim_predictions = []
427
+ # keep track of the minimum score of null start+end of position 0
428
+ score_null = 1000000 # large and positive
429
+ min_null_feature_index = 0 # the paragraph slice with min null score
430
+ null_start_logit = 0 # the start logit at the slice with min null score
431
+ null_end_logit = 0 # the end logit at the slice with min null score
432
+ for feature_index, feature in enumerate(features):
433
+ result = unique_id_to_result[feature.unique_id]
434
+ start_indexes = _get_best_indexes(result.start_logits, n_best_size)
435
+ end_indexes = _get_best_indexes(result.end_logits, n_best_size)
436
+ # if we could have irrelevant answers, get the min score of irrelevant
437
+ if version_2_with_negative:
438
+ feature_null_score = result.start_logits[0] + result.end_logits[0]
439
+ if feature_null_score < score_null:
440
+ score_null = feature_null_score
441
+ min_null_feature_index = feature_index
442
+ null_start_logit = result.start_logits[0]
443
+ null_end_logit = result.end_logits[0]
444
+ for start_index in start_indexes:
445
+ for end_index in end_indexes:
446
+ # We could hypothetically create invalid predictions, e.g., predict
447
+ # that the start of the span is in the question. We throw out all
448
+ # invalid predictions.
449
+ if start_index >= len(feature.tokens):
450
+ continue
451
+ if end_index >= len(feature.tokens):
452
+ continue
453
+ if start_index not in feature.token_to_orig_map:
454
+ continue
455
+ if end_index not in feature.token_to_orig_map:
456
+ continue
457
+ if not feature.token_is_max_context.get(start_index, False):
458
+ continue
459
+ if end_index < start_index:
460
+ continue
461
+ length = end_index - start_index + 1
462
+ if length > max_answer_length:
463
+ continue
464
+ prelim_predictions.append(
465
+ _PrelimPrediction(
466
+ feature_index=feature_index,
467
+ start_index=start_index,
468
+ end_index=end_index,
469
+ start_logit=result.start_logits[start_index],
470
+ end_logit=result.end_logits[end_index],
471
+ )
472
+ )
473
+ if version_2_with_negative:
474
+ prelim_predictions.append(
475
+ _PrelimPrediction(
476
+ feature_index=min_null_feature_index,
477
+ start_index=0,
478
+ end_index=0,
479
+ start_logit=null_start_logit,
480
+ end_logit=null_end_logit,
481
+ )
482
+ )
483
+ prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True)
484
+
485
+ _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
486
+ "NbestPrediction", ["text", "start_logit", "end_logit"]
487
+ )
488
+
489
+ seen_predictions = {}
490
+ nbest = []
491
+ for pred in prelim_predictions:
492
+ if len(nbest) >= n_best_size:
493
+ break
494
+ feature = features[pred.feature_index]
495
+ if pred.start_index > 0: # this is a non-null prediction
496
+ tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
497
+ orig_doc_start = feature.token_to_orig_map[pred.start_index]
498
+ orig_doc_end = feature.token_to_orig_map[pred.end_index]
499
+ orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
500
+
501
+ tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
502
+
503
+ # tok_text = " ".join(tok_tokens)
504
+ #
505
+ # # De-tokenize WordPieces that have been split off.
506
+ # tok_text = tok_text.replace(" ##", "")
507
+ # tok_text = tok_text.replace("##", "")
508
+
509
+ # Clean whitespace
510
+ tok_text = tok_text.strip()
511
+ tok_text = " ".join(tok_text.split())
512
+ orig_text = " ".join(orig_tokens)
513
+
514
+ final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
515
+ if final_text in seen_predictions:
516
+ continue
517
+
518
+ seen_predictions[final_text] = True
519
+ else:
520
+ final_text = ""
521
+ seen_predictions[final_text] = True
522
+
523
+ nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit))
524
+ # if we didn't include the empty option in the n-best, include it
525
+ if version_2_with_negative:
526
+ if "" not in seen_predictions:
527
+ nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit))
528
+
529
+ # In very rare edge cases we could only have single null prediction.
530
+ # So we just create a nonce prediction in this case to avoid failure.
531
+ if len(nbest) == 1:
532
+ nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
533
+
534
+ # In very rare edge cases we could have no valid predictions. So we
535
+ # just create a nonce prediction in this case to avoid failure.
536
+ if not nbest:
537
+ nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
538
+
539
+ if len(nbest) < 1:
540
+ raise ValueError("No valid predictions")
541
+
542
+ total_scores = []
543
+ best_non_null_entry = None
544
+ for entry in nbest:
545
+ total_scores.append(entry.start_logit + entry.end_logit)
546
+ if not best_non_null_entry:
547
+ if entry.text:
548
+ best_non_null_entry = entry
549
+
550
+ probs = _compute_softmax(total_scores)
551
+
552
+ nbest_json = []
553
+ for i, entry in enumerate(nbest):
554
+ output = collections.OrderedDict()
555
+ output["text"] = entry.text
556
+ output["probability"] = probs[i]
557
+ output["start_logit"] = entry.start_logit
558
+ output["end_logit"] = entry.end_logit
559
+ nbest_json.append(output)
560
+
561
+ if len(nbest_json) < 1:
562
+ raise ValueError("No valid predictions")
563
+
564
+ if not version_2_with_negative:
565
+ all_predictions[example.qas_id] = nbest_json[0]["text"]
566
+ else:
567
+ # predict "" iff the null score - the score of best non-null > threshold
568
+ score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit)
569
+ scores_diff_json[example.qas_id] = score_diff
570
+ if score_diff > null_score_diff_threshold:
571
+ all_predictions[example.qas_id] = ""
572
+ else:
573
+ all_predictions[example.qas_id] = best_non_null_entry.text
574
+ all_nbest_json[example.qas_id] = nbest_json
575
+
576
+ if output_prediction_file:
577
+ with open(output_prediction_file, "w") as writer:
578
+ writer.write(json.dumps(all_predictions, indent=4) + "\n")
579
+
580
+ if output_nbest_file:
581
+ with open(output_nbest_file, "w") as writer:
582
+ writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
583
+
584
+ if output_null_log_odds_file and version_2_with_negative:
585
+ with open(output_null_log_odds_file, "w") as writer:
586
+ writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
587
+
588
+ return all_predictions
589
+
590
+
591
+ def compute_predictions_log_probs(
592
+ all_examples,
593
+ all_features,
594
+ all_results,
595
+ n_best_size,
596
+ max_answer_length,
597
+ output_prediction_file,
598
+ output_nbest_file,
599
+ output_null_log_odds_file,
600
+ start_n_top,
601
+ end_n_top,
602
+ version_2_with_negative,
603
+ tokenizer,
604
+ verbose_logging,
605
+ ):
606
+ """
607
+ XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of
608
+ null if needed.
609
+
610
+ Requires utils_squad_evaluate.py
611
+ """
612
+ _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
613
+ "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"]
614
+ )
615
+
616
+ _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
617
+ "NbestPrediction", ["text", "start_log_prob", "end_log_prob"]
618
+ )
619
+
620
+ logger.info(f"Writing predictions to: {output_prediction_file}")
621
+
622
+ example_index_to_features = collections.defaultdict(list)
623
+ for feature in all_features:
624
+ example_index_to_features[feature.example_index].append(feature)
625
+
626
+ unique_id_to_result = {}
627
+ for result in all_results:
628
+ unique_id_to_result[result.unique_id] = result
629
+
630
+ all_predictions = collections.OrderedDict()
631
+ all_nbest_json = collections.OrderedDict()
632
+ scores_diff_json = collections.OrderedDict()
633
+
634
+ for example_index, example in enumerate(all_examples):
635
+ features = example_index_to_features[example_index]
636
+
637
+ prelim_predictions = []
638
+ # keep track of the minimum score of null start+end of position 0
639
+ score_null = 1000000 # large and positive
640
+
641
+ for feature_index, feature in enumerate(features):
642
+ result = unique_id_to_result[feature.unique_id]
643
+
644
+ cur_null_score = result.cls_logits
645
+
646
+ # if we could have irrelevant answers, get the min score of irrelevant
647
+ score_null = min(score_null, cur_null_score)
648
+
649
+ for i in range(start_n_top):
650
+ for j in range(end_n_top):
651
+ start_log_prob = result.start_logits[i]
652
+ start_index = result.start_top_index[i]
653
+
654
+ j_index = i * end_n_top + j
655
+
656
+ end_log_prob = result.end_logits[j_index]
657
+ end_index = result.end_top_index[j_index]
658
+
659
+ # We could hypothetically create invalid predictions, e.g., predict
660
+ # that the start of the span is in the question. We throw out all
661
+ # invalid predictions.
662
+ if start_index >= feature.paragraph_len - 1:
663
+ continue
664
+ if end_index >= feature.paragraph_len - 1:
665
+ continue
666
+
667
+ if not feature.token_is_max_context.get(start_index, False):
668
+ continue
669
+ if end_index < start_index:
670
+ continue
671
+ length = end_index - start_index + 1
672
+ if length > max_answer_length:
673
+ continue
674
+
675
+ prelim_predictions.append(
676
+ _PrelimPrediction(
677
+ feature_index=feature_index,
678
+ start_index=start_index,
679
+ end_index=end_index,
680
+ start_log_prob=start_log_prob,
681
+ end_log_prob=end_log_prob,
682
+ )
683
+ )
684
+
685
+ prelim_predictions = sorted(
686
+ prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True
687
+ )
688
+
689
+ seen_predictions = {}
690
+ nbest = []
691
+ for pred in prelim_predictions:
692
+ if len(nbest) >= n_best_size:
693
+ break
694
+ feature = features[pred.feature_index]
695
+
696
+ # XLNet un-tokenizer
697
+ # Let's keep it simple for now and see if we need all this later.
698
+ #
699
+ # tok_start_to_orig_index = feature.tok_start_to_orig_index
700
+ # tok_end_to_orig_index = feature.tok_end_to_orig_index
701
+ # start_orig_pos = tok_start_to_orig_index[pred.start_index]
702
+ # end_orig_pos = tok_end_to_orig_index[pred.end_index]
703
+ # paragraph_text = example.paragraph_text
704
+ # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip()
705
+
706
+ # Previously used Bert untokenizer
707
+ tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)]
708
+ orig_doc_start = feature.token_to_orig_map[pred.start_index]
709
+ orig_doc_end = feature.token_to_orig_map[pred.end_index]
710
+ orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)]
711
+ tok_text = tokenizer.convert_tokens_to_string(tok_tokens)
712
+
713
+ # Clean whitespace
714
+ tok_text = tok_text.strip()
715
+ tok_text = " ".join(tok_text.split())
716
+ orig_text = " ".join(orig_tokens)
717
+
718
+ if hasattr(tokenizer, "do_lower_case"):
719
+ do_lower_case = tokenizer.do_lower_case
720
+ else:
721
+ do_lower_case = tokenizer.do_lowercase_and_remove_accent
722
+
723
+ final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging)
724
+
725
+ if final_text in seen_predictions:
726
+ continue
727
+
728
+ seen_predictions[final_text] = True
729
+
730
+ nbest.append(
731
+ _NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob)
732
+ )
733
+
734
+ # In very rare edge cases we could have no valid predictions. So we
735
+ # just create a nonce prediction in this case to avoid failure.
736
+ if not nbest:
737
+ nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6))
738
+
739
+ total_scores = []
740
+ best_non_null_entry = None
741
+ for entry in nbest:
742
+ total_scores.append(entry.start_log_prob + entry.end_log_prob)
743
+ if not best_non_null_entry:
744
+ best_non_null_entry = entry
745
+
746
+ probs = _compute_softmax(total_scores)
747
+
748
+ nbest_json = []
749
+ for i, entry in enumerate(nbest):
750
+ output = collections.OrderedDict()
751
+ output["text"] = entry.text
752
+ output["probability"] = probs[i]
753
+ output["start_log_prob"] = entry.start_log_prob
754
+ output["end_log_prob"] = entry.end_log_prob
755
+ nbest_json.append(output)
756
+
757
+ if len(nbest_json) < 1:
758
+ raise ValueError("No valid predictions")
759
+ if best_non_null_entry is None:
760
+ raise ValueError("No valid predictions")
761
+
762
+ score_diff = score_null
763
+ scores_diff_json[example.qas_id] = score_diff
764
+ # note(zhiliny): always predict best_non_null_entry
765
+ # and the evaluation script will search for the best threshold
766
+ all_predictions[example.qas_id] = best_non_null_entry.text
767
+
768
+ all_nbest_json[example.qas_id] = nbest_json
769
+
770
+ with open(output_prediction_file, "w") as writer:
771
+ writer.write(json.dumps(all_predictions, indent=4) + "\n")
772
+
773
+ with open(output_nbest_file, "w") as writer:
774
+ writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
775
+
776
+ if version_2_with_negative:
777
+ with open(output_null_log_odds_file, "w") as writer:
778
+ writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
779
+
780
+ return all_predictions
llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
16
+ from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features
17
+ from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
18
+ from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (756 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc ADDED
Binary file (17.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc ADDED
Binary file (20 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc ADDED
Binary file (12.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc ADDED
Binary file (2.54 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/data/processors/glue.py ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ GLUE processors and helpers"""
17
+
18
+ import os
19
+ import warnings
20
+ from dataclasses import asdict
21
+ from enum import Enum
22
+ from typing import List, Optional, Union
23
+
24
+ from ...tokenization_utils import PreTrainedTokenizer
25
+ from ...utils import is_tf_available, logging
26
+ from .utils import DataProcessor, InputExample, InputFeatures
27
+
28
+
29
+ if is_tf_available():
30
+ import tensorflow as tf
31
+
32
+ logger = logging.get_logger(__name__)
33
+
34
+ DEPRECATION_WARNING = (
35
+ "This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
36
+ "library. You can have a look at this example script for pointers: "
37
+ "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
38
+ )
39
+
40
+
41
+ def glue_convert_examples_to_features(
42
+ examples: Union[List[InputExample], "tf.data.Dataset"],
43
+ tokenizer: PreTrainedTokenizer,
44
+ max_length: Optional[int] = None,
45
+ task=None,
46
+ label_list=None,
47
+ output_mode=None,
48
+ ):
49
+ """
50
+ Loads a data file into a list of `InputFeatures`
51
+
52
+ Args:
53
+ examples: List of `InputExamples` or `tf.data.Dataset` containing the examples.
54
+ tokenizer: Instance of a tokenizer that will tokenize the examples
55
+ max_length: Maximum example length. Defaults to the tokenizer's max_len
56
+ task: GLUE task
57
+ label_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method
58
+ output_mode: String indicating the output mode. Either `regression` or `classification`
59
+
60
+ Returns:
61
+ If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific
62
+ features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which
63
+ can be fed to the model.
64
+
65
+ """
66
+ warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning)
67
+ if is_tf_available() and isinstance(examples, tf.data.Dataset):
68
+ if task is None:
69
+ raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.")
70
+ return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
71
+ return _glue_convert_examples_to_features(
72
+ examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode
73
+ )
74
+
75
+
76
+ if is_tf_available():
77
+
78
+ def _tf_glue_convert_examples_to_features(
79
+ examples: tf.data.Dataset,
80
+ tokenizer: PreTrainedTokenizer,
81
+ task=str,
82
+ max_length: Optional[int] = None,
83
+ ) -> tf.data.Dataset:
84
+ """
85
+ Returns:
86
+ A `tf.data.Dataset` containing the task-specific features.
87
+
88
+ """
89
+ processor = glue_processors[task]()
90
+ examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples]
91
+ features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task)
92
+ label_type = tf.float32 if task == "sts-b" else tf.int64
93
+
94
+ def gen():
95
+ for ex in features:
96
+ d = {k: v for k, v in asdict(ex).items() if v is not None}
97
+ label = d.pop("label")
98
+ yield (d, label)
99
+
100
+ input_names = tokenizer.model_input_names
101
+
102
+ return tf.data.Dataset.from_generator(
103
+ gen,
104
+ ({k: tf.int32 for k in input_names}, label_type),
105
+ ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])),
106
+ )
107
+
108
+
109
+ def _glue_convert_examples_to_features(
110
+ examples: List[InputExample],
111
+ tokenizer: PreTrainedTokenizer,
112
+ max_length: Optional[int] = None,
113
+ task=None,
114
+ label_list=None,
115
+ output_mode=None,
116
+ ):
117
+ if max_length is None:
118
+ max_length = tokenizer.model_max_length
119
+
120
+ if task is not None:
121
+ processor = glue_processors[task]()
122
+ if label_list is None:
123
+ label_list = processor.get_labels()
124
+ logger.info(f"Using label list {label_list} for task {task}")
125
+ if output_mode is None:
126
+ output_mode = glue_output_modes[task]
127
+ logger.info(f"Using output mode {output_mode} for task {task}")
128
+
129
+ label_map = {label: i for i, label in enumerate(label_list)}
130
+
131
+ def label_from_example(example: InputExample) -> Union[int, float, None]:
132
+ if example.label is None:
133
+ return None
134
+ if output_mode == "classification":
135
+ return label_map[example.label]
136
+ elif output_mode == "regression":
137
+ return float(example.label)
138
+ raise KeyError(output_mode)
139
+
140
+ labels = [label_from_example(example) for example in examples]
141
+
142
+ batch_encoding = tokenizer(
143
+ [(example.text_a, example.text_b) for example in examples],
144
+ max_length=max_length,
145
+ padding="max_length",
146
+ truncation=True,
147
+ )
148
+
149
+ features = []
150
+ for i in range(len(examples)):
151
+ inputs = {k: batch_encoding[k][i] for k in batch_encoding}
152
+
153
+ feature = InputFeatures(**inputs, label=labels[i])
154
+ features.append(feature)
155
+
156
+ for i, example in enumerate(examples[:5]):
157
+ logger.info("*** Example ***")
158
+ logger.info(f"guid: {example.guid}")
159
+ logger.info(f"features: {features[i]}")
160
+
161
+ return features
162
+
163
+
164
+ class OutputMode(Enum):
165
+ classification = "classification"
166
+ regression = "regression"
167
+
168
+
169
+ class MrpcProcessor(DataProcessor):
170
+ """Processor for the MRPC data set (GLUE version)."""
171
+
172
+ def __init__(self, *args, **kwargs):
173
+ super().__init__(*args, **kwargs)
174
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
175
+
176
+ def get_example_from_tensor_dict(self, tensor_dict):
177
+ """See base class."""
178
+ return InputExample(
179
+ tensor_dict["idx"].numpy(),
180
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
181
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
182
+ str(tensor_dict["label"].numpy()),
183
+ )
184
+
185
+ def get_train_examples(self, data_dir):
186
+ """See base class."""
187
+ logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}")
188
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
189
+
190
+ def get_dev_examples(self, data_dir):
191
+ """See base class."""
192
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
193
+
194
+ def get_test_examples(self, data_dir):
195
+ """See base class."""
196
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
197
+
198
+ def get_labels(self):
199
+ """See base class."""
200
+ return ["0", "1"]
201
+
202
+ def _create_examples(self, lines, set_type):
203
+ """Creates examples for the training, dev and test sets."""
204
+ examples = []
205
+ for i, line in enumerate(lines):
206
+ if i == 0:
207
+ continue
208
+ guid = f"{set_type}-{i}"
209
+ text_a = line[3]
210
+ text_b = line[4]
211
+ label = None if set_type == "test" else line[0]
212
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
213
+ return examples
214
+
215
+
216
+ class MnliProcessor(DataProcessor):
217
+ """Processor for the MultiNLI data set (GLUE version)."""
218
+
219
+ def __init__(self, *args, **kwargs):
220
+ super().__init__(*args, **kwargs)
221
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
222
+
223
+ def get_example_from_tensor_dict(self, tensor_dict):
224
+ """See base class."""
225
+ return InputExample(
226
+ tensor_dict["idx"].numpy(),
227
+ tensor_dict["premise"].numpy().decode("utf-8"),
228
+ tensor_dict["hypothesis"].numpy().decode("utf-8"),
229
+ str(tensor_dict["label"].numpy()),
230
+ )
231
+
232
+ def get_train_examples(self, data_dir):
233
+ """See base class."""
234
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
235
+
236
+ def get_dev_examples(self, data_dir):
237
+ """See base class."""
238
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched")
239
+
240
+ def get_test_examples(self, data_dir):
241
+ """See base class."""
242
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched")
243
+
244
+ def get_labels(self):
245
+ """See base class."""
246
+ return ["contradiction", "entailment", "neutral"]
247
+
248
+ def _create_examples(self, lines, set_type):
249
+ """Creates examples for the training, dev and test sets."""
250
+ examples = []
251
+ for i, line in enumerate(lines):
252
+ if i == 0:
253
+ continue
254
+ guid = f"{set_type}-{line[0]}"
255
+ text_a = line[8]
256
+ text_b = line[9]
257
+ label = None if set_type.startswith("test") else line[-1]
258
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
259
+ return examples
260
+
261
+
262
+ class MnliMismatchedProcessor(MnliProcessor):
263
+ """Processor for the MultiNLI Mismatched data set (GLUE version)."""
264
+
265
+ def __init__(self, *args, **kwargs):
266
+ super().__init__(*args, **kwargs)
267
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
268
+
269
+ def get_dev_examples(self, data_dir):
270
+ """See base class."""
271
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_mismatched")
272
+
273
+ def get_test_examples(self, data_dir):
274
+ """See base class."""
275
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test_mismatched")
276
+
277
+
278
+ class ColaProcessor(DataProcessor):
279
+ """Processor for the CoLA data set (GLUE version)."""
280
+
281
+ def __init__(self, *args, **kwargs):
282
+ super().__init__(*args, **kwargs)
283
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
284
+
285
+ def get_example_from_tensor_dict(self, tensor_dict):
286
+ """See base class."""
287
+ return InputExample(
288
+ tensor_dict["idx"].numpy(),
289
+ tensor_dict["sentence"].numpy().decode("utf-8"),
290
+ None,
291
+ str(tensor_dict["label"].numpy()),
292
+ )
293
+
294
+ def get_train_examples(self, data_dir):
295
+ """See base class."""
296
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
297
+
298
+ def get_dev_examples(self, data_dir):
299
+ """See base class."""
300
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
301
+
302
+ def get_test_examples(self, data_dir):
303
+ """See base class."""
304
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
305
+
306
+ def get_labels(self):
307
+ """See base class."""
308
+ return ["0", "1"]
309
+
310
+ def _create_examples(self, lines, set_type):
311
+ """Creates examples for the training, dev and test sets."""
312
+ test_mode = set_type == "test"
313
+ if test_mode:
314
+ lines = lines[1:]
315
+ text_index = 1 if test_mode else 3
316
+ examples = []
317
+ for i, line in enumerate(lines):
318
+ guid = f"{set_type}-{i}"
319
+ text_a = line[text_index]
320
+ label = None if test_mode else line[1]
321
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
322
+ return examples
323
+
324
+
325
+ class Sst2Processor(DataProcessor):
326
+ """Processor for the SST-2 data set (GLUE version)."""
327
+
328
+ def __init__(self, *args, **kwargs):
329
+ super().__init__(*args, **kwargs)
330
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
331
+
332
+ def get_example_from_tensor_dict(self, tensor_dict):
333
+ """See base class."""
334
+ return InputExample(
335
+ tensor_dict["idx"].numpy(),
336
+ tensor_dict["sentence"].numpy().decode("utf-8"),
337
+ None,
338
+ str(tensor_dict["label"].numpy()),
339
+ )
340
+
341
+ def get_train_examples(self, data_dir):
342
+ """See base class."""
343
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
344
+
345
+ def get_dev_examples(self, data_dir):
346
+ """See base class."""
347
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
348
+
349
+ def get_test_examples(self, data_dir):
350
+ """See base class."""
351
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
352
+
353
+ def get_labels(self):
354
+ """See base class."""
355
+ return ["0", "1"]
356
+
357
+ def _create_examples(self, lines, set_type):
358
+ """Creates examples for the training, dev and test sets."""
359
+ examples = []
360
+ text_index = 1 if set_type == "test" else 0
361
+ for i, line in enumerate(lines):
362
+ if i == 0:
363
+ continue
364
+ guid = f"{set_type}-{i}"
365
+ text_a = line[text_index]
366
+ label = None if set_type == "test" else line[1]
367
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
368
+ return examples
369
+
370
+
371
+ class StsbProcessor(DataProcessor):
372
+ """Processor for the STS-B data set (GLUE version)."""
373
+
374
+ def __init__(self, *args, **kwargs):
375
+ super().__init__(*args, **kwargs)
376
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
377
+
378
+ def get_example_from_tensor_dict(self, tensor_dict):
379
+ """See base class."""
380
+ return InputExample(
381
+ tensor_dict["idx"].numpy(),
382
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
383
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
384
+ str(tensor_dict["label"].numpy()),
385
+ )
386
+
387
+ def get_train_examples(self, data_dir):
388
+ """See base class."""
389
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
390
+
391
+ def get_dev_examples(self, data_dir):
392
+ """See base class."""
393
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
394
+
395
+ def get_test_examples(self, data_dir):
396
+ """See base class."""
397
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
398
+
399
+ def get_labels(self):
400
+ """See base class."""
401
+ return [None]
402
+
403
+ def _create_examples(self, lines, set_type):
404
+ """Creates examples for the training, dev and test sets."""
405
+ examples = []
406
+ for i, line in enumerate(lines):
407
+ if i == 0:
408
+ continue
409
+ guid = f"{set_type}-{line[0]}"
410
+ text_a = line[7]
411
+ text_b = line[8]
412
+ label = None if set_type == "test" else line[-1]
413
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
414
+ return examples
415
+
416
+
417
+ class QqpProcessor(DataProcessor):
418
+ """Processor for the QQP data set (GLUE version)."""
419
+
420
+ def __init__(self, *args, **kwargs):
421
+ super().__init__(*args, **kwargs)
422
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
423
+
424
+ def get_example_from_tensor_dict(self, tensor_dict):
425
+ """See base class."""
426
+ return InputExample(
427
+ tensor_dict["idx"].numpy(),
428
+ tensor_dict["question1"].numpy().decode("utf-8"),
429
+ tensor_dict["question2"].numpy().decode("utf-8"),
430
+ str(tensor_dict["label"].numpy()),
431
+ )
432
+
433
+ def get_train_examples(self, data_dir):
434
+ """See base class."""
435
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
436
+
437
+ def get_dev_examples(self, data_dir):
438
+ """See base class."""
439
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
440
+
441
+ def get_test_examples(self, data_dir):
442
+ """See base class."""
443
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
444
+
445
+ def get_labels(self):
446
+ """See base class."""
447
+ return ["0", "1"]
448
+
449
+ def _create_examples(self, lines, set_type):
450
+ """Creates examples for the training, dev and test sets."""
451
+ test_mode = set_type == "test"
452
+ q1_index = 1 if test_mode else 3
453
+ q2_index = 2 if test_mode else 4
454
+ examples = []
455
+ for i, line in enumerate(lines):
456
+ if i == 0:
457
+ continue
458
+ guid = f"{set_type}-{line[0]}"
459
+ try:
460
+ text_a = line[q1_index]
461
+ text_b = line[q2_index]
462
+ label = None if test_mode else line[5]
463
+ except IndexError:
464
+ continue
465
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
466
+ return examples
467
+
468
+
469
+ class QnliProcessor(DataProcessor):
470
+ """Processor for the QNLI data set (GLUE version)."""
471
+
472
+ def __init__(self, *args, **kwargs):
473
+ super().__init__(*args, **kwargs)
474
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
475
+
476
+ def get_example_from_tensor_dict(self, tensor_dict):
477
+ """See base class."""
478
+ return InputExample(
479
+ tensor_dict["idx"].numpy(),
480
+ tensor_dict["question"].numpy().decode("utf-8"),
481
+ tensor_dict["sentence"].numpy().decode("utf-8"),
482
+ str(tensor_dict["label"].numpy()),
483
+ )
484
+
485
+ def get_train_examples(self, data_dir):
486
+ """See base class."""
487
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
488
+
489
+ def get_dev_examples(self, data_dir):
490
+ """See base class."""
491
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
492
+
493
+ def get_test_examples(self, data_dir):
494
+ """See base class."""
495
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
496
+
497
+ def get_labels(self):
498
+ """See base class."""
499
+ return ["entailment", "not_entailment"]
500
+
501
+ def _create_examples(self, lines, set_type):
502
+ """Creates examples for the training, dev and test sets."""
503
+ examples = []
504
+ for i, line in enumerate(lines):
505
+ if i == 0:
506
+ continue
507
+ guid = f"{set_type}-{line[0]}"
508
+ text_a = line[1]
509
+ text_b = line[2]
510
+ label = None if set_type == "test" else line[-1]
511
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
512
+ return examples
513
+
514
+
515
+ class RteProcessor(DataProcessor):
516
+ """Processor for the RTE data set (GLUE version)."""
517
+
518
+ def __init__(self, *args, **kwargs):
519
+ super().__init__(*args, **kwargs)
520
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
521
+
522
+ def get_example_from_tensor_dict(self, tensor_dict):
523
+ """See base class."""
524
+ return InputExample(
525
+ tensor_dict["idx"].numpy(),
526
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
527
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
528
+ str(tensor_dict["label"].numpy()),
529
+ )
530
+
531
+ def get_train_examples(self, data_dir):
532
+ """See base class."""
533
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
534
+
535
+ def get_dev_examples(self, data_dir):
536
+ """See base class."""
537
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
538
+
539
+ def get_test_examples(self, data_dir):
540
+ """See base class."""
541
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
542
+
543
+ def get_labels(self):
544
+ """See base class."""
545
+ return ["entailment", "not_entailment"]
546
+
547
+ def _create_examples(self, lines, set_type):
548
+ """Creates examples for the training, dev and test sets."""
549
+ examples = []
550
+ for i, line in enumerate(lines):
551
+ if i == 0:
552
+ continue
553
+ guid = f"{set_type}-{line[0]}"
554
+ text_a = line[1]
555
+ text_b = line[2]
556
+ label = None if set_type == "test" else line[-1]
557
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
558
+ return examples
559
+
560
+
561
+ class WnliProcessor(DataProcessor):
562
+ """Processor for the WNLI data set (GLUE version)."""
563
+
564
+ def __init__(self, *args, **kwargs):
565
+ super().__init__(*args, **kwargs)
566
+ warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning)
567
+
568
+ def get_example_from_tensor_dict(self, tensor_dict):
569
+ """See base class."""
570
+ return InputExample(
571
+ tensor_dict["idx"].numpy(),
572
+ tensor_dict["sentence1"].numpy().decode("utf-8"),
573
+ tensor_dict["sentence2"].numpy().decode("utf-8"),
574
+ str(tensor_dict["label"].numpy()),
575
+ )
576
+
577
+ def get_train_examples(self, data_dir):
578
+ """See base class."""
579
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
580
+
581
+ def get_dev_examples(self, data_dir):
582
+ """See base class."""
583
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
584
+
585
+ def get_test_examples(self, data_dir):
586
+ """See base class."""
587
+ return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
588
+
589
+ def get_labels(self):
590
+ """See base class."""
591
+ return ["0", "1"]
592
+
593
+ def _create_examples(self, lines, set_type):
594
+ """Creates examples for the training, dev and test sets."""
595
+ examples = []
596
+ for i, line in enumerate(lines):
597
+ if i == 0:
598
+ continue
599
+ guid = f"{set_type}-{line[0]}"
600
+ text_a = line[1]
601
+ text_b = line[2]
602
+ label = None if set_type == "test" else line[-1]
603
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
604
+ return examples
605
+
606
+
607
+ glue_tasks_num_labels = {
608
+ "cola": 2,
609
+ "mnli": 3,
610
+ "mrpc": 2,
611
+ "sst-2": 2,
612
+ "sts-b": 1,
613
+ "qqp": 2,
614
+ "qnli": 2,
615
+ "rte": 2,
616
+ "wnli": 2,
617
+ }
618
+
619
+ glue_processors = {
620
+ "cola": ColaProcessor,
621
+ "mnli": MnliProcessor,
622
+ "mnli-mm": MnliMismatchedProcessor,
623
+ "mrpc": MrpcProcessor,
624
+ "sst-2": Sst2Processor,
625
+ "sts-b": StsbProcessor,
626
+ "qqp": QqpProcessor,
627
+ "qnli": QnliProcessor,
628
+ "rte": RteProcessor,
629
+ "wnli": WnliProcessor,
630
+ }
631
+
632
+ glue_output_modes = {
633
+ "cola": "classification",
634
+ "mnli": "classification",
635
+ "mnli-mm": "classification",
636
+ "mrpc": "classification",
637
+ "sst-2": "classification",
638
+ "sts-b": "regression",
639
+ "qqp": "classification",
640
+ "qnli": "classification",
641
+ "rte": "classification",
642
+ "wnli": "classification",
643
+ }
llmeval-env/lib/python3.10/site-packages/transformers/data/processors/squad.py ADDED
@@ -0,0 +1,845 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import json
16
+ import os
17
+ from functools import partial
18
+ from multiprocessing import Pool, cpu_count
19
+
20
+ import numpy as np
21
+ from tqdm import tqdm
22
+
23
+ from ...models.bert.tokenization_bert import whitespace_tokenize
24
+ from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy
25
+ from ...utils import is_tf_available, is_torch_available, logging
26
+ from .utils import DataProcessor
27
+
28
+
29
+ # Store the tokenizers which insert 2 separators tokens
30
+ MULTI_SEP_TOKENS_TOKENIZERS_SET = {"roberta", "camembert", "bart", "mpnet"}
31
+
32
+
33
+ if is_torch_available():
34
+ import torch
35
+ from torch.utils.data import TensorDataset
36
+
37
+ if is_tf_available():
38
+ import tensorflow as tf
39
+
40
+ logger = logging.get_logger(__name__)
41
+
42
+
43
+ def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
44
+ """Returns tokenized answer spans that better match the annotated answer."""
45
+ tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
46
+
47
+ for new_start in range(input_start, input_end + 1):
48
+ for new_end in range(input_end, new_start - 1, -1):
49
+ text_span = " ".join(doc_tokens[new_start : (new_end + 1)])
50
+ if text_span == tok_answer_text:
51
+ return (new_start, new_end)
52
+
53
+ return (input_start, input_end)
54
+
55
+
56
+ def _check_is_max_context(doc_spans, cur_span_index, position):
57
+ """Check if this is the 'max context' doc span for the token."""
58
+ best_score = None
59
+ best_span_index = None
60
+ for span_index, doc_span in enumerate(doc_spans):
61
+ end = doc_span.start + doc_span.length - 1
62
+ if position < doc_span.start:
63
+ continue
64
+ if position > end:
65
+ continue
66
+ num_left_context = position - doc_span.start
67
+ num_right_context = end - position
68
+ score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
69
+ if best_score is None or score > best_score:
70
+ best_score = score
71
+ best_span_index = span_index
72
+
73
+ return cur_span_index == best_span_index
74
+
75
+
76
+ def _new_check_is_max_context(doc_spans, cur_span_index, position):
77
+ """Check if this is the 'max context' doc span for the token."""
78
+ # if len(doc_spans) == 1:
79
+ # return True
80
+ best_score = None
81
+ best_span_index = None
82
+ for span_index, doc_span in enumerate(doc_spans):
83
+ end = doc_span["start"] + doc_span["length"] - 1
84
+ if position < doc_span["start"]:
85
+ continue
86
+ if position > end:
87
+ continue
88
+ num_left_context = position - doc_span["start"]
89
+ num_right_context = end - position
90
+ score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
91
+ if best_score is None or score > best_score:
92
+ best_score = score
93
+ best_span_index = span_index
94
+
95
+ return cur_span_index == best_span_index
96
+
97
+
98
+ def _is_whitespace(c):
99
+ if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
100
+ return True
101
+ return False
102
+
103
+
104
+ def squad_convert_example_to_features(
105
+ example, max_seq_length, doc_stride, max_query_length, padding_strategy, is_training
106
+ ):
107
+ features = []
108
+ if is_training and not example.is_impossible:
109
+ # Get start and end position
110
+ start_position = example.start_position
111
+ end_position = example.end_position
112
+
113
+ # If the answer cannot be found in the text, then skip this example.
114
+ actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)])
115
+ cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text))
116
+ if actual_text.find(cleaned_answer_text) == -1:
117
+ logger.warning(f"Could not find answer: '{actual_text}' vs. '{cleaned_answer_text}'")
118
+ return []
119
+
120
+ tok_to_orig_index = []
121
+ orig_to_tok_index = []
122
+ all_doc_tokens = []
123
+ for i, token in enumerate(example.doc_tokens):
124
+ orig_to_tok_index.append(len(all_doc_tokens))
125
+ if tokenizer.__class__.__name__ in [
126
+ "RobertaTokenizer",
127
+ "LongformerTokenizer",
128
+ "BartTokenizer",
129
+ "RobertaTokenizerFast",
130
+ "LongformerTokenizerFast",
131
+ "BartTokenizerFast",
132
+ ]:
133
+ sub_tokens = tokenizer.tokenize(token, add_prefix_space=True)
134
+ else:
135
+ sub_tokens = tokenizer.tokenize(token)
136
+ for sub_token in sub_tokens:
137
+ tok_to_orig_index.append(i)
138
+ all_doc_tokens.append(sub_token)
139
+
140
+ if is_training and not example.is_impossible:
141
+ tok_start_position = orig_to_tok_index[example.start_position]
142
+ if example.end_position < len(example.doc_tokens) - 1:
143
+ tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
144
+ else:
145
+ tok_end_position = len(all_doc_tokens) - 1
146
+
147
+ (tok_start_position, tok_end_position) = _improve_answer_span(
148
+ all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text
149
+ )
150
+
151
+ spans = []
152
+
153
+ truncated_query = tokenizer.encode(
154
+ example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length
155
+ )
156
+
157
+ # Tokenizers who insert 2 SEP tokens in-between <context> & <question> need to have special handling
158
+ # in the way they compute mask of added tokens.
159
+ tokenizer_type = type(tokenizer).__name__.replace("Tokenizer", "").lower()
160
+ sequence_added_tokens = (
161
+ tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1
162
+ if tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET
163
+ else tokenizer.model_max_length - tokenizer.max_len_single_sentence
164
+ )
165
+ sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair
166
+
167
+ span_doc_tokens = all_doc_tokens
168
+ while len(spans) * doc_stride < len(all_doc_tokens):
169
+ # Define the side we want to truncate / pad and the text/pair sorting
170
+ if tokenizer.padding_side == "right":
171
+ texts = truncated_query
172
+ pairs = span_doc_tokens
173
+ truncation = TruncationStrategy.ONLY_SECOND.value
174
+ else:
175
+ texts = span_doc_tokens
176
+ pairs = truncated_query
177
+ truncation = TruncationStrategy.ONLY_FIRST.value
178
+
179
+ encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic
180
+ texts,
181
+ pairs,
182
+ truncation=truncation,
183
+ padding=padding_strategy,
184
+ max_length=max_seq_length,
185
+ return_overflowing_tokens=True,
186
+ stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens,
187
+ return_token_type_ids=True,
188
+ )
189
+
190
+ paragraph_len = min(
191
+ len(all_doc_tokens) - len(spans) * doc_stride,
192
+ max_seq_length - len(truncated_query) - sequence_pair_added_tokens,
193
+ )
194
+
195
+ if tokenizer.pad_token_id in encoded_dict["input_ids"]:
196
+ if tokenizer.padding_side == "right":
197
+ non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)]
198
+ else:
199
+ last_padding_id_position = (
200
+ len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id)
201
+ )
202
+ non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :]
203
+
204
+ else:
205
+ non_padded_ids = encoded_dict["input_ids"]
206
+
207
+ tokens = tokenizer.convert_ids_to_tokens(non_padded_ids)
208
+
209
+ token_to_orig_map = {}
210
+ for i in range(paragraph_len):
211
+ index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i
212
+ token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i]
213
+
214
+ encoded_dict["paragraph_len"] = paragraph_len
215
+ encoded_dict["tokens"] = tokens
216
+ encoded_dict["token_to_orig_map"] = token_to_orig_map
217
+ encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens
218
+ encoded_dict["token_is_max_context"] = {}
219
+ encoded_dict["start"] = len(spans) * doc_stride
220
+ encoded_dict["length"] = paragraph_len
221
+
222
+ spans.append(encoded_dict)
223
+
224
+ if "overflowing_tokens" not in encoded_dict or (
225
+ "overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0
226
+ ):
227
+ break
228
+ span_doc_tokens = encoded_dict["overflowing_tokens"]
229
+
230
+ for doc_span_index in range(len(spans)):
231
+ for j in range(spans[doc_span_index]["paragraph_len"]):
232
+ is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j)
233
+ index = (
234
+ j
235
+ if tokenizer.padding_side == "left"
236
+ else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j
237
+ )
238
+ spans[doc_span_index]["token_is_max_context"][index] = is_max_context
239
+
240
+ for span in spans:
241
+ # Identify the position of the CLS token
242
+ cls_index = span["input_ids"].index(tokenizer.cls_token_id)
243
+
244
+ # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer)
245
+ # Original TF implementation also keep the classification token (set to 0)
246
+ p_mask = np.ones_like(span["token_type_ids"])
247
+ if tokenizer.padding_side == "right":
248
+ p_mask[len(truncated_query) + sequence_added_tokens :] = 0
249
+ else:
250
+ p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0
251
+
252
+ pad_token_indices = np.where(span["input_ids"] == tokenizer.pad_token_id)
253
+ special_token_indices = np.asarray(
254
+ tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True)
255
+ ).nonzero()
256
+
257
+ p_mask[pad_token_indices] = 1
258
+ p_mask[special_token_indices] = 1
259
+
260
+ # Set the cls index to 0: the CLS index can be used for impossible answers
261
+ p_mask[cls_index] = 0
262
+
263
+ span_is_impossible = example.is_impossible
264
+ start_position = 0
265
+ end_position = 0
266
+ if is_training and not span_is_impossible:
267
+ # For training, if our document chunk does not contain an annotation
268
+ # we throw it out, since there is nothing to predict.
269
+ doc_start = span["start"]
270
+ doc_end = span["start"] + span["length"] - 1
271
+ out_of_span = False
272
+
273
+ if not (tok_start_position >= doc_start and tok_end_position <= doc_end):
274
+ out_of_span = True
275
+
276
+ if out_of_span:
277
+ start_position = cls_index
278
+ end_position = cls_index
279
+ span_is_impossible = True
280
+ else:
281
+ if tokenizer.padding_side == "left":
282
+ doc_offset = 0
283
+ else:
284
+ doc_offset = len(truncated_query) + sequence_added_tokens
285
+
286
+ start_position = tok_start_position - doc_start + doc_offset
287
+ end_position = tok_end_position - doc_start + doc_offset
288
+
289
+ features.append(
290
+ SquadFeatures(
291
+ span["input_ids"],
292
+ span["attention_mask"],
293
+ span["token_type_ids"],
294
+ cls_index,
295
+ p_mask.tolist(),
296
+ example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing.
297
+ unique_id=0,
298
+ paragraph_len=span["paragraph_len"],
299
+ token_is_max_context=span["token_is_max_context"],
300
+ tokens=span["tokens"],
301
+ token_to_orig_map=span["token_to_orig_map"],
302
+ start_position=start_position,
303
+ end_position=end_position,
304
+ is_impossible=span_is_impossible,
305
+ qas_id=example.qas_id,
306
+ )
307
+ )
308
+ return features
309
+
310
+
311
+ def squad_convert_example_to_features_init(tokenizer_for_convert: PreTrainedTokenizerBase):
312
+ global tokenizer
313
+ tokenizer = tokenizer_for_convert
314
+
315
+
316
+ def squad_convert_examples_to_features(
317
+ examples,
318
+ tokenizer,
319
+ max_seq_length,
320
+ doc_stride,
321
+ max_query_length,
322
+ is_training,
323
+ padding_strategy="max_length",
324
+ return_dataset=False,
325
+ threads=1,
326
+ tqdm_enabled=True,
327
+ ):
328
+ """
329
+ Converts a list of examples into a list of features that can be directly given as input to a model. It is
330
+ model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs.
331
+
332
+ Args:
333
+ examples: list of [`~data.processors.squad.SquadExample`]
334
+ tokenizer: an instance of a child of [`PreTrainedTokenizer`]
335
+ max_seq_length: The maximum sequence length of the inputs.
336
+ doc_stride: The stride used when the context is too large and is split across several features.
337
+ max_query_length: The maximum length of the query.
338
+ is_training: whether to create features for model evaluation or model training.
339
+ padding_strategy: Default to "max_length". Which padding strategy to use
340
+ return_dataset: Default False. Either 'pt' or 'tf'.
341
+ if 'pt': returns a torch.data.TensorDataset, if 'tf': returns a tf.data.Dataset
342
+ threads: multiple processing threads.
343
+
344
+
345
+ Returns:
346
+ list of [`~data.processors.squad.SquadFeatures`]
347
+
348
+ Example:
349
+
350
+ ```python
351
+ processor = SquadV2Processor()
352
+ examples = processor.get_dev_examples(data_dir)
353
+
354
+ features = squad_convert_examples_to_features(
355
+ examples=examples,
356
+ tokenizer=tokenizer,
357
+ max_seq_length=args.max_seq_length,
358
+ doc_stride=args.doc_stride,
359
+ max_query_length=args.max_query_length,
360
+ is_training=not evaluate,
361
+ )
362
+ ```"""
363
+ # Defining helper methods
364
+ features = []
365
+
366
+ threads = min(threads, cpu_count())
367
+ with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p:
368
+ annotate_ = partial(
369
+ squad_convert_example_to_features,
370
+ max_seq_length=max_seq_length,
371
+ doc_stride=doc_stride,
372
+ max_query_length=max_query_length,
373
+ padding_strategy=padding_strategy,
374
+ is_training=is_training,
375
+ )
376
+ features = list(
377
+ tqdm(
378
+ p.imap(annotate_, examples, chunksize=32),
379
+ total=len(examples),
380
+ desc="convert squad examples to features",
381
+ disable=not tqdm_enabled,
382
+ )
383
+ )
384
+
385
+ new_features = []
386
+ unique_id = 1000000000
387
+ example_index = 0
388
+ for example_features in tqdm(
389
+ features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled
390
+ ):
391
+ if not example_features:
392
+ continue
393
+ for example_feature in example_features:
394
+ example_feature.example_index = example_index
395
+ example_feature.unique_id = unique_id
396
+ new_features.append(example_feature)
397
+ unique_id += 1
398
+ example_index += 1
399
+ features = new_features
400
+ del new_features
401
+ if return_dataset == "pt":
402
+ if not is_torch_available():
403
+ raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.")
404
+
405
+ # Convert to Tensors and build dataset
406
+ all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
407
+ all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
408
+ all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
409
+ all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long)
410
+ all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float)
411
+ all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float)
412
+
413
+ if not is_training:
414
+ all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long)
415
+ dataset = TensorDataset(
416
+ all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask
417
+ )
418
+ else:
419
+ all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long)
420
+ all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long)
421
+ dataset = TensorDataset(
422
+ all_input_ids,
423
+ all_attention_masks,
424
+ all_token_type_ids,
425
+ all_start_positions,
426
+ all_end_positions,
427
+ all_cls_index,
428
+ all_p_mask,
429
+ all_is_impossible,
430
+ )
431
+
432
+ return features, dataset
433
+ elif return_dataset == "tf":
434
+ if not is_tf_available():
435
+ raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.")
436
+
437
+ def gen():
438
+ for i, ex in enumerate(features):
439
+ if ex.token_type_ids is None:
440
+ yield (
441
+ {
442
+ "input_ids": ex.input_ids,
443
+ "attention_mask": ex.attention_mask,
444
+ "feature_index": i,
445
+ "qas_id": ex.qas_id,
446
+ },
447
+ {
448
+ "start_positions": ex.start_position,
449
+ "end_positions": ex.end_position,
450
+ "cls_index": ex.cls_index,
451
+ "p_mask": ex.p_mask,
452
+ "is_impossible": ex.is_impossible,
453
+ },
454
+ )
455
+ else:
456
+ yield (
457
+ {
458
+ "input_ids": ex.input_ids,
459
+ "attention_mask": ex.attention_mask,
460
+ "token_type_ids": ex.token_type_ids,
461
+ "feature_index": i,
462
+ "qas_id": ex.qas_id,
463
+ },
464
+ {
465
+ "start_positions": ex.start_position,
466
+ "end_positions": ex.end_position,
467
+ "cls_index": ex.cls_index,
468
+ "p_mask": ex.p_mask,
469
+ "is_impossible": ex.is_impossible,
470
+ },
471
+ )
472
+
473
+ # Why have we split the batch into a tuple? PyTorch just has a list of tensors.
474
+ if "token_type_ids" in tokenizer.model_input_names:
475
+ train_types = (
476
+ {
477
+ "input_ids": tf.int32,
478
+ "attention_mask": tf.int32,
479
+ "token_type_ids": tf.int32,
480
+ "feature_index": tf.int64,
481
+ "qas_id": tf.string,
482
+ },
483
+ {
484
+ "start_positions": tf.int64,
485
+ "end_positions": tf.int64,
486
+ "cls_index": tf.int64,
487
+ "p_mask": tf.int32,
488
+ "is_impossible": tf.int32,
489
+ },
490
+ )
491
+
492
+ train_shapes = (
493
+ {
494
+ "input_ids": tf.TensorShape([None]),
495
+ "attention_mask": tf.TensorShape([None]),
496
+ "token_type_ids": tf.TensorShape([None]),
497
+ "feature_index": tf.TensorShape([]),
498
+ "qas_id": tf.TensorShape([]),
499
+ },
500
+ {
501
+ "start_positions": tf.TensorShape([]),
502
+ "end_positions": tf.TensorShape([]),
503
+ "cls_index": tf.TensorShape([]),
504
+ "p_mask": tf.TensorShape([None]),
505
+ "is_impossible": tf.TensorShape([]),
506
+ },
507
+ )
508
+ else:
509
+ train_types = (
510
+ {"input_ids": tf.int32, "attention_mask": tf.int32, "feature_index": tf.int64, "qas_id": tf.string},
511
+ {
512
+ "start_positions": tf.int64,
513
+ "end_positions": tf.int64,
514
+ "cls_index": tf.int64,
515
+ "p_mask": tf.int32,
516
+ "is_impossible": tf.int32,
517
+ },
518
+ )
519
+
520
+ train_shapes = (
521
+ {
522
+ "input_ids": tf.TensorShape([None]),
523
+ "attention_mask": tf.TensorShape([None]),
524
+ "feature_index": tf.TensorShape([]),
525
+ "qas_id": tf.TensorShape([]),
526
+ },
527
+ {
528
+ "start_positions": tf.TensorShape([]),
529
+ "end_positions": tf.TensorShape([]),
530
+ "cls_index": tf.TensorShape([]),
531
+ "p_mask": tf.TensorShape([None]),
532
+ "is_impossible": tf.TensorShape([]),
533
+ },
534
+ )
535
+
536
+ return tf.data.Dataset.from_generator(gen, train_types, train_shapes)
537
+ else:
538
+ return features
539
+
540
+
541
+ class SquadProcessor(DataProcessor):
542
+ """
543
+ Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and
544
+ version 2.0 of SQuAD, respectively.
545
+ """
546
+
547
+ train_file = None
548
+ dev_file = None
549
+
550
+ def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False):
551
+ if not evaluate:
552
+ answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8")
553
+ answer_start = tensor_dict["answers"]["answer_start"][0].numpy()
554
+ answers = []
555
+ else:
556
+ answers = [
557
+ {"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")}
558
+ for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"])
559
+ ]
560
+
561
+ answer = None
562
+ answer_start = None
563
+
564
+ return SquadExample(
565
+ qas_id=tensor_dict["id"].numpy().decode("utf-8"),
566
+ question_text=tensor_dict["question"].numpy().decode("utf-8"),
567
+ context_text=tensor_dict["context"].numpy().decode("utf-8"),
568
+ answer_text=answer,
569
+ start_position_character=answer_start,
570
+ title=tensor_dict["title"].numpy().decode("utf-8"),
571
+ answers=answers,
572
+ )
573
+
574
+ def get_examples_from_dataset(self, dataset, evaluate=False):
575
+ """
576
+ Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset.
577
+
578
+ Args:
579
+ dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")*
580
+ evaluate: Boolean specifying if in evaluation mode or in training mode
581
+
582
+ Returns:
583
+ List of SquadExample
584
+
585
+ Examples:
586
+
587
+ ```python
588
+ >>> import tensorflow_datasets as tfds
589
+
590
+ >>> dataset = tfds.load("squad")
591
+
592
+ >>> training_examples = get_examples_from_dataset(dataset, evaluate=False)
593
+ >>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True)
594
+ ```"""
595
+
596
+ if evaluate:
597
+ dataset = dataset["validation"]
598
+ else:
599
+ dataset = dataset["train"]
600
+
601
+ examples = []
602
+ for tensor_dict in tqdm(dataset):
603
+ examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate))
604
+
605
+ return examples
606
+
607
+ def get_train_examples(self, data_dir, filename=None):
608
+ """
609
+ Returns the training examples from the data directory.
610
+
611
+ Args:
612
+ data_dir: Directory containing the data files used for training and evaluating.
613
+ filename: None by default, specify this if the training file has a different name than the original one
614
+ which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
615
+
616
+ """
617
+ if data_dir is None:
618
+ data_dir = ""
619
+
620
+ if self.train_file is None:
621
+ raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
622
+
623
+ with open(
624
+ os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8"
625
+ ) as reader:
626
+ input_data = json.load(reader)["data"]
627
+ return self._create_examples(input_data, "train")
628
+
629
+ def get_dev_examples(self, data_dir, filename=None):
630
+ """
631
+ Returns the evaluation example from the data directory.
632
+
633
+ Args:
634
+ data_dir: Directory containing the data files used for training and evaluating.
635
+ filename: None by default, specify this if the evaluation file has a different name than the original one
636
+ which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively.
637
+ """
638
+ if data_dir is None:
639
+ data_dir = ""
640
+
641
+ if self.dev_file is None:
642
+ raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")
643
+
644
+ with open(
645
+ os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
646
+ ) as reader:
647
+ input_data = json.load(reader)["data"]
648
+ return self._create_examples(input_data, "dev")
649
+
650
+ def _create_examples(self, input_data, set_type):
651
+ is_training = set_type == "train"
652
+ examples = []
653
+ for entry in tqdm(input_data):
654
+ title = entry["title"]
655
+ for paragraph in entry["paragraphs"]:
656
+ context_text = paragraph["context"]
657
+ for qa in paragraph["qas"]:
658
+ qas_id = qa["id"]
659
+ question_text = qa["question"]
660
+ start_position_character = None
661
+ answer_text = None
662
+ answers = []
663
+
664
+ is_impossible = qa.get("is_impossible", False)
665
+ if not is_impossible:
666
+ if is_training:
667
+ answer = qa["answers"][0]
668
+ answer_text = answer["text"]
669
+ start_position_character = answer["answer_start"]
670
+ else:
671
+ answers = qa["answers"]
672
+
673
+ example = SquadExample(
674
+ qas_id=qas_id,
675
+ question_text=question_text,
676
+ context_text=context_text,
677
+ answer_text=answer_text,
678
+ start_position_character=start_position_character,
679
+ title=title,
680
+ is_impossible=is_impossible,
681
+ answers=answers,
682
+ )
683
+ examples.append(example)
684
+ return examples
685
+
686
+
687
+ class SquadV1Processor(SquadProcessor):
688
+ train_file = "train-v1.1.json"
689
+ dev_file = "dev-v1.1.json"
690
+
691
+
692
+ class SquadV2Processor(SquadProcessor):
693
+ train_file = "train-v2.0.json"
694
+ dev_file = "dev-v2.0.json"
695
+
696
+
697
+ class SquadExample:
698
+ """
699
+ A single training/test example for the Squad dataset, as loaded from disk.
700
+
701
+ Args:
702
+ qas_id: The example's unique identifier
703
+ question_text: The question string
704
+ context_text: The context string
705
+ answer_text: The answer string
706
+ start_position_character: The character position of the start of the answer
707
+ title: The title of the example
708
+ answers: None by default, this is used during evaluation. Holds answers as well as their start positions.
709
+ is_impossible: False by default, set to True if the example has no possible answer.
710
+ """
711
+
712
+ def __init__(
713
+ self,
714
+ qas_id,
715
+ question_text,
716
+ context_text,
717
+ answer_text,
718
+ start_position_character,
719
+ title,
720
+ answers=[],
721
+ is_impossible=False,
722
+ ):
723
+ self.qas_id = qas_id
724
+ self.question_text = question_text
725
+ self.context_text = context_text
726
+ self.answer_text = answer_text
727
+ self.title = title
728
+ self.is_impossible = is_impossible
729
+ self.answers = answers
730
+
731
+ self.start_position, self.end_position = 0, 0
732
+
733
+ doc_tokens = []
734
+ char_to_word_offset = []
735
+ prev_is_whitespace = True
736
+
737
+ # Split on whitespace so that different tokens may be attributed to their original position.
738
+ for c in self.context_text:
739
+ if _is_whitespace(c):
740
+ prev_is_whitespace = True
741
+ else:
742
+ if prev_is_whitespace:
743
+ doc_tokens.append(c)
744
+ else:
745
+ doc_tokens[-1] += c
746
+ prev_is_whitespace = False
747
+ char_to_word_offset.append(len(doc_tokens) - 1)
748
+
749
+ self.doc_tokens = doc_tokens
750
+ self.char_to_word_offset = char_to_word_offset
751
+
752
+ # Start and end positions only has a value during evaluation.
753
+ if start_position_character is not None and not is_impossible:
754
+ self.start_position = char_to_word_offset[start_position_character]
755
+ self.end_position = char_to_word_offset[
756
+ min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1)
757
+ ]
758
+
759
+
760
+ class SquadFeatures:
761
+ """
762
+ Single squad example features to be fed to a model. Those features are model-specific and can be crafted from
763
+ [`~data.processors.squad.SquadExample`] using the
764
+ :method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method.
765
+
766
+ Args:
767
+ input_ids: Indices of input sequence tokens in the vocabulary.
768
+ attention_mask: Mask to avoid performing attention on padding token indices.
769
+ token_type_ids: Segment token indices to indicate first and second portions of the inputs.
770
+ cls_index: the index of the CLS token.
771
+ p_mask: Mask identifying tokens that can be answers vs. tokens that cannot.
772
+ Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer
773
+ example_index: the index of the example
774
+ unique_id: The unique Feature identifier
775
+ paragraph_len: The length of the context
776
+ token_is_max_context:
777
+ List of booleans identifying which tokens have their maximum context in this feature object. If a token
778
+ does not have their maximum context in this feature object, it means that another feature object has more
779
+ information related to that token and should be prioritized over this feature for that token.
780
+ tokens: list of tokens corresponding to the input ids
781
+ token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer.
782
+ start_position: start of the answer token index
783
+ end_position: end of the answer token index
784
+ encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods.
785
+ """
786
+
787
+ def __init__(
788
+ self,
789
+ input_ids,
790
+ attention_mask,
791
+ token_type_ids,
792
+ cls_index,
793
+ p_mask,
794
+ example_index,
795
+ unique_id,
796
+ paragraph_len,
797
+ token_is_max_context,
798
+ tokens,
799
+ token_to_orig_map,
800
+ start_position,
801
+ end_position,
802
+ is_impossible,
803
+ qas_id: str = None,
804
+ encoding: BatchEncoding = None,
805
+ ):
806
+ self.input_ids = input_ids
807
+ self.attention_mask = attention_mask
808
+ self.token_type_ids = token_type_ids
809
+ self.cls_index = cls_index
810
+ self.p_mask = p_mask
811
+
812
+ self.example_index = example_index
813
+ self.unique_id = unique_id
814
+ self.paragraph_len = paragraph_len
815
+ self.token_is_max_context = token_is_max_context
816
+ self.tokens = tokens
817
+ self.token_to_orig_map = token_to_orig_map
818
+
819
+ self.start_position = start_position
820
+ self.end_position = end_position
821
+ self.is_impossible = is_impossible
822
+ self.qas_id = qas_id
823
+
824
+ self.encoding = encoding
825
+
826
+
827
+ class SquadResult:
828
+ """
829
+ Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset.
830
+
831
+ Args:
832
+ unique_id: The unique identifier corresponding to that example.
833
+ start_logits: The logits corresponding to the start of the answer
834
+ end_logits: The logits corresponding to the end of the answer
835
+ """
836
+
837
+ def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None):
838
+ self.start_logits = start_logits
839
+ self.end_logits = end_logits
840
+ self.unique_id = unique_id
841
+
842
+ if start_top_index:
843
+ self.start_top_index = start_top_index
844
+ self.end_top_index = end_top_index
845
+ self.cls_logits = cls_logits
llmeval-env/lib/python3.10/site-packages/transformers/data/processors/utils.py ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import csv
18
+ import dataclasses
19
+ import json
20
+ from dataclasses import dataclass
21
+ from typing import List, Optional, Union
22
+
23
+ from ...utils import is_tf_available, is_torch_available, logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ @dataclass
30
+ class InputExample:
31
+ """
32
+ A single training/test example for simple sequence classification.
33
+
34
+ Args:
35
+ guid: Unique id for the example.
36
+ text_a: string. The untokenized text of the first sequence. For single
37
+ sequence tasks, only this sequence must be specified.
38
+ text_b: (Optional) string. The untokenized text of the second sequence.
39
+ Only must be specified for sequence pair tasks.
40
+ label: (Optional) string. The label of the example. This should be
41
+ specified for train and dev examples, but not for test examples.
42
+ """
43
+
44
+ guid: str
45
+ text_a: str
46
+ text_b: Optional[str] = None
47
+ label: Optional[str] = None
48
+
49
+ def to_json_string(self):
50
+ """Serializes this instance to a JSON string."""
51
+ return json.dumps(dataclasses.asdict(self), indent=2) + "\n"
52
+
53
+
54
+ @dataclass(frozen=True)
55
+ class InputFeatures:
56
+ """
57
+ A single set of features of data. Property names are the same names as the corresponding inputs to a model.
58
+
59
+ Args:
60
+ input_ids: Indices of input sequence tokens in the vocabulary.
61
+ attention_mask: Mask to avoid performing attention on padding token indices.
62
+ Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded)
63
+ tokens.
64
+ token_type_ids: (Optional) Segment token indices to indicate first and second
65
+ portions of the inputs. Only some models use them.
66
+ label: (Optional) Label corresponding to the input. Int for classification problems,
67
+ float for regression problems.
68
+ """
69
+
70
+ input_ids: List[int]
71
+ attention_mask: Optional[List[int]] = None
72
+ token_type_ids: Optional[List[int]] = None
73
+ label: Optional[Union[int, float]] = None
74
+
75
+ def to_json_string(self):
76
+ """Serializes this instance to a JSON string."""
77
+ return json.dumps(dataclasses.asdict(self)) + "\n"
78
+
79
+
80
+ class DataProcessor:
81
+ """Base class for data converters for sequence classification data sets."""
82
+
83
+ def get_example_from_tensor_dict(self, tensor_dict):
84
+ """
85
+ Gets an example from a dict with tensorflow tensors.
86
+
87
+ Args:
88
+ tensor_dict: Keys and values should match the corresponding Glue
89
+ tensorflow_dataset examples.
90
+ """
91
+ raise NotImplementedError()
92
+
93
+ def get_train_examples(self, data_dir):
94
+ """Gets a collection of [`InputExample`] for the train set."""
95
+ raise NotImplementedError()
96
+
97
+ def get_dev_examples(self, data_dir):
98
+ """Gets a collection of [`InputExample`] for the dev set."""
99
+ raise NotImplementedError()
100
+
101
+ def get_test_examples(self, data_dir):
102
+ """Gets a collection of [`InputExample`] for the test set."""
103
+ raise NotImplementedError()
104
+
105
+ def get_labels(self):
106
+ """Gets the list of labels for this data set."""
107
+ raise NotImplementedError()
108
+
109
+ def tfds_map(self, example):
110
+ """
111
+ Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts
112
+ examples to the correct format.
113
+ """
114
+ if len(self.get_labels()) > 1:
115
+ example.label = self.get_labels()[int(example.label)]
116
+ return example
117
+
118
+ @classmethod
119
+ def _read_tsv(cls, input_file, quotechar=None):
120
+ """Reads a tab separated value file."""
121
+ with open(input_file, "r", encoding="utf-8-sig") as f:
122
+ return list(csv.reader(f, delimiter="\t", quotechar=quotechar))
123
+
124
+
125
+ class SingleSentenceClassificationProcessor(DataProcessor):
126
+ """Generic processor for a single sentence classification data set."""
127
+
128
+ def __init__(self, labels=None, examples=None, mode="classification", verbose=False):
129
+ self.labels = [] if labels is None else labels
130
+ self.examples = [] if examples is None else examples
131
+ self.mode = mode
132
+ self.verbose = verbose
133
+
134
+ def __len__(self):
135
+ return len(self.examples)
136
+
137
+ def __getitem__(self, idx):
138
+ if isinstance(idx, slice):
139
+ return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx])
140
+ return self.examples[idx]
141
+
142
+ @classmethod
143
+ def create_from_csv(
144
+ cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs
145
+ ):
146
+ processor = cls(**kwargs)
147
+ processor.add_examples_from_csv(
148
+ file_name,
149
+ split_name=split_name,
150
+ column_label=column_label,
151
+ column_text=column_text,
152
+ column_id=column_id,
153
+ skip_first_row=skip_first_row,
154
+ overwrite_labels=True,
155
+ overwrite_examples=True,
156
+ )
157
+ return processor
158
+
159
+ @classmethod
160
+ def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs):
161
+ processor = cls(**kwargs)
162
+ processor.add_examples(texts_or_text_and_labels, labels=labels)
163
+ return processor
164
+
165
+ def add_examples_from_csv(
166
+ self,
167
+ file_name,
168
+ split_name="",
169
+ column_label=0,
170
+ column_text=1,
171
+ column_id=None,
172
+ skip_first_row=False,
173
+ overwrite_labels=False,
174
+ overwrite_examples=False,
175
+ ):
176
+ lines = self._read_tsv(file_name)
177
+ if skip_first_row:
178
+ lines = lines[1:]
179
+ texts = []
180
+ labels = []
181
+ ids = []
182
+ for i, line in enumerate(lines):
183
+ texts.append(line[column_text])
184
+ labels.append(line[column_label])
185
+ if column_id is not None:
186
+ ids.append(line[column_id])
187
+ else:
188
+ guid = f"{split_name}-{i}" if split_name else str(i)
189
+ ids.append(guid)
190
+
191
+ return self.add_examples(
192
+ texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples
193
+ )
194
+
195
+ def add_examples(
196
+ self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False
197
+ ):
198
+ if labels is not None and len(texts_or_text_and_labels) != len(labels):
199
+ raise ValueError(
200
+ f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}"
201
+ )
202
+ if ids is not None and len(texts_or_text_and_labels) != len(ids):
203
+ raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}")
204
+ if ids is None:
205
+ ids = [None] * len(texts_or_text_and_labels)
206
+ if labels is None:
207
+ labels = [None] * len(texts_or_text_and_labels)
208
+ examples = []
209
+ added_labels = set()
210
+ for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids):
211
+ if isinstance(text_or_text_and_label, (tuple, list)) and label is None:
212
+ text, label = text_or_text_and_label
213
+ else:
214
+ text = text_or_text_and_label
215
+ added_labels.add(label)
216
+ examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label))
217
+
218
+ # Update examples
219
+ if overwrite_examples:
220
+ self.examples = examples
221
+ else:
222
+ self.examples.extend(examples)
223
+
224
+ # Update labels
225
+ if overwrite_labels:
226
+ self.labels = list(added_labels)
227
+ else:
228
+ self.labels = list(set(self.labels).union(added_labels))
229
+
230
+ return self.examples
231
+
232
+ def get_features(
233
+ self,
234
+ tokenizer,
235
+ max_length=None,
236
+ pad_on_left=False,
237
+ pad_token=0,
238
+ mask_padding_with_zero=True,
239
+ return_tensors=None,
240
+ ):
241
+ """
242
+ Convert examples in a list of `InputFeatures`
243
+
244
+ Args:
245
+ tokenizer: Instance of a tokenizer that will tokenize the examples
246
+ max_length: Maximum example length
247
+ pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default)
248
+ pad_token: Padding token
249
+ mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values
250
+ and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual
251
+ values)
252
+
253
+ Returns:
254
+ If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the
255
+ task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific
256
+ `InputFeatures` which can be fed to the model.
257
+
258
+ """
259
+ if max_length is None:
260
+ max_length = tokenizer.max_len
261
+
262
+ label_map = {label: i for i, label in enumerate(self.labels)}
263
+
264
+ all_input_ids = []
265
+ for ex_index, example in enumerate(self.examples):
266
+ if ex_index % 10000 == 0:
267
+ logger.info(f"Tokenizing example {ex_index}")
268
+
269
+ input_ids = tokenizer.encode(
270
+ example.text_a,
271
+ add_special_tokens=True,
272
+ max_length=min(max_length, tokenizer.max_len),
273
+ )
274
+ all_input_ids.append(input_ids)
275
+
276
+ batch_length = max(len(input_ids) for input_ids in all_input_ids)
277
+
278
+ features = []
279
+ for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)):
280
+ if ex_index % 10000 == 0:
281
+ logger.info(f"Writing example {ex_index}/{len(self.examples)}")
282
+ # The mask has 1 for real tokens and 0 for padding tokens. Only real
283
+ # tokens are attended to.
284
+ attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
285
+
286
+ # Zero-pad up to the sequence length.
287
+ padding_length = batch_length - len(input_ids)
288
+ if pad_on_left:
289
+ input_ids = ([pad_token] * padding_length) + input_ids
290
+ attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask
291
+ else:
292
+ input_ids = input_ids + ([pad_token] * padding_length)
293
+ attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
294
+
295
+ if len(input_ids) != batch_length:
296
+ raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}")
297
+ if len(attention_mask) != batch_length:
298
+ raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}")
299
+
300
+ if self.mode == "classification":
301
+ label = label_map[example.label]
302
+ elif self.mode == "regression":
303
+ label = float(example.label)
304
+ else:
305
+ raise ValueError(self.mode)
306
+
307
+ if ex_index < 5 and self.verbose:
308
+ logger.info("*** Example ***")
309
+ logger.info(f"guid: {example.guid}")
310
+ logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}")
311
+ logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}")
312
+ logger.info(f"label: {example.label} (id = {label})")
313
+
314
+ features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label))
315
+
316
+ if return_tensors is None:
317
+ return features
318
+ elif return_tensors == "tf":
319
+ if not is_tf_available():
320
+ raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported")
321
+ import tensorflow as tf
322
+
323
+ def gen():
324
+ for ex in features:
325
+ yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label)
326
+
327
+ dataset = tf.data.Dataset.from_generator(
328
+ gen,
329
+ ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64),
330
+ ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])),
331
+ )
332
+ return dataset
333
+ elif return_tensors == "pt":
334
+ if not is_torch_available():
335
+ raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported")
336
+ import torch
337
+ from torch.utils.data import TensorDataset
338
+
339
+ all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
340
+ all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
341
+ if self.mode == "classification":
342
+ all_labels = torch.tensor([f.label for f in features], dtype=torch.long)
343
+ elif self.mode == "regression":
344
+ all_labels = torch.tensor([f.label for f in features], dtype=torch.float)
345
+
346
+ dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels)
347
+ return dataset
348
+ else:
349
+ raise ValueError("return_tensors should be one of 'tf' or 'pt'")
llmeval-env/lib/python3.10/site-packages/transformers/data/processors/xnli.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ XNLI utils (dataset loading and evaluation)"""
17
+
18
+
19
+ import os
20
+
21
+ from ...utils import logging
22
+ from .utils import DataProcessor, InputExample
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ class XnliProcessor(DataProcessor):
29
+ """
30
+ Processor for the XNLI dataset. Adapted from
31
+ https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207
32
+ """
33
+
34
+ def __init__(self, language, train_language=None):
35
+ self.language = language
36
+ self.train_language = train_language
37
+
38
+ def get_train_examples(self, data_dir):
39
+ """See base class."""
40
+ lg = self.language if self.train_language is None else self.train_language
41
+ lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv"))
42
+ examples = []
43
+ for i, line in enumerate(lines):
44
+ if i == 0:
45
+ continue
46
+ guid = f"train-{i}"
47
+ text_a = line[0]
48
+ text_b = line[1]
49
+ label = "contradiction" if line[2] == "contradictory" else line[2]
50
+ if not isinstance(text_a, str):
51
+ raise ValueError(f"Training input {text_a} is not a string")
52
+ if not isinstance(text_b, str):
53
+ raise ValueError(f"Training input {text_b} is not a string")
54
+ if not isinstance(label, str):
55
+ raise ValueError(f"Training label {label} is not a string")
56
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
57
+ return examples
58
+
59
+ def get_test_examples(self, data_dir):
60
+ """See base class."""
61
+ lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv"))
62
+ examples = []
63
+ for i, line in enumerate(lines):
64
+ if i == 0:
65
+ continue
66
+ language = line[0]
67
+ if language != self.language:
68
+ continue
69
+ guid = f"test-{i}"
70
+ text_a = line[6]
71
+ text_b = line[7]
72
+ label = line[1]
73
+ if not isinstance(text_a, str):
74
+ raise ValueError(f"Training input {text_a} is not a string")
75
+ if not isinstance(text_b, str):
76
+ raise ValueError(f"Training input {text_b} is not a string")
77
+ if not isinstance(label, str):
78
+ raise ValueError(f"Training label {label} is not a string")
79
+ examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
80
+ return examples
81
+
82
+ def get_labels(self):
83
+ """See base class."""
84
+ return ["contradiction", "entailment", "neutral"]
85
+
86
+
87
+ xnli_processors = {
88
+ "xnli": XnliProcessor,
89
+ }
90
+
91
+ xnli_output_modes = {
92
+ "xnli": "classification",
93
+ }
94
+
95
+ xnli_tasks_num_labels = {
96
+ "xnli": 3,
97
+ }
llmeval-env/lib/python3.10/site-packages/transformers/generation/__init__.py ADDED
@@ -0,0 +1,310 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available
18
+
19
+
20
+ _import_structure = {
21
+ "configuration_utils": ["GenerationConfig", "GenerationMode"],
22
+ "streamers": ["TextIteratorStreamer", "TextStreamer"],
23
+ }
24
+
25
+ try:
26
+ if not is_torch_available():
27
+ raise OptionalDependencyNotAvailable()
28
+ except OptionalDependencyNotAvailable:
29
+ pass
30
+ else:
31
+ _import_structure["beam_constraints"] = [
32
+ "Constraint",
33
+ "ConstraintListState",
34
+ "DisjunctiveConstraint",
35
+ "PhrasalConstraint",
36
+ ]
37
+ _import_structure["beam_search"] = [
38
+ "BeamHypotheses",
39
+ "BeamScorer",
40
+ "BeamSearchScorer",
41
+ "ConstrainedBeamSearchScorer",
42
+ ]
43
+ _import_structure["candidate_generator"] = [
44
+ "AssistedCandidateGenerator",
45
+ "CandidateGenerator",
46
+ "PromptLookupCandidateGenerator",
47
+ ]
48
+ _import_structure["logits_process"] = [
49
+ "AlternatingCodebooksLogitsProcessor",
50
+ "ClassifierFreeGuidanceLogitsProcessor",
51
+ "EncoderNoRepeatNGramLogitsProcessor",
52
+ "EncoderRepetitionPenaltyLogitsProcessor",
53
+ "EpsilonLogitsWarper",
54
+ "EtaLogitsWarper",
55
+ "ExponentialDecayLengthPenalty",
56
+ "ForcedBOSTokenLogitsProcessor",
57
+ "ForcedEOSTokenLogitsProcessor",
58
+ "ForceTokensLogitsProcessor",
59
+ "HammingDiversityLogitsProcessor",
60
+ "InfNanRemoveLogitsProcessor",
61
+ "LogitNormalization",
62
+ "LogitsProcessor",
63
+ "LogitsProcessorList",
64
+ "LogitsWarper",
65
+ "MinLengthLogitsProcessor",
66
+ "MinNewTokensLengthLogitsProcessor",
67
+ "NoBadWordsLogitsProcessor",
68
+ "NoRepeatNGramLogitsProcessor",
69
+ "PrefixConstrainedLogitsProcessor",
70
+ "RepetitionPenaltyLogitsProcessor",
71
+ "SequenceBiasLogitsProcessor",
72
+ "SuppressTokensLogitsProcessor",
73
+ "SuppressTokensAtBeginLogitsProcessor",
74
+ "TemperatureLogitsWarper",
75
+ "TopKLogitsWarper",
76
+ "TopPLogitsWarper",
77
+ "TypicalLogitsWarper",
78
+ "UnbatchedClassifierFreeGuidanceLogitsProcessor",
79
+ "WhisperTimeStampLogitsProcessor",
80
+ ]
81
+ _import_structure["stopping_criteria"] = [
82
+ "MaxNewTokensCriteria",
83
+ "MaxLengthCriteria",
84
+ "MaxTimeCriteria",
85
+ "EosTokenCriteria",
86
+ "StoppingCriteria",
87
+ "StoppingCriteriaList",
88
+ "validate_stopping_criteria",
89
+ ]
90
+ _import_structure["utils"] = [
91
+ "GenerationMixin",
92
+ "GreedySearchEncoderDecoderOutput",
93
+ "GreedySearchDecoderOnlyOutput",
94
+ "SampleEncoderDecoderOutput",
95
+ "SampleDecoderOnlyOutput",
96
+ "BeamSearchEncoderDecoderOutput",
97
+ "BeamSearchDecoderOnlyOutput",
98
+ "BeamSampleEncoderDecoderOutput",
99
+ "BeamSampleDecoderOnlyOutput",
100
+ "ContrastiveSearchEncoderDecoderOutput",
101
+ "ContrastiveSearchDecoderOnlyOutput",
102
+ "GenerateBeamDecoderOnlyOutput",
103
+ "GenerateBeamEncoderDecoderOutput",
104
+ "GenerateDecoderOnlyOutput",
105
+ "GenerateEncoderDecoderOutput",
106
+ ]
107
+
108
+ try:
109
+ if not is_tf_available():
110
+ raise OptionalDependencyNotAvailable()
111
+ except OptionalDependencyNotAvailable:
112
+ pass
113
+ else:
114
+ _import_structure["tf_logits_process"] = [
115
+ "TFForcedBOSTokenLogitsProcessor",
116
+ "TFForcedEOSTokenLogitsProcessor",
117
+ "TFForceTokensLogitsProcessor",
118
+ "TFLogitsProcessor",
119
+ "TFLogitsProcessorList",
120
+ "TFLogitsWarper",
121
+ "TFMinLengthLogitsProcessor",
122
+ "TFNoBadWordsLogitsProcessor",
123
+ "TFNoRepeatNGramLogitsProcessor",
124
+ "TFRepetitionPenaltyLogitsProcessor",
125
+ "TFSuppressTokensAtBeginLogitsProcessor",
126
+ "TFSuppressTokensLogitsProcessor",
127
+ "TFTemperatureLogitsWarper",
128
+ "TFTopKLogitsWarper",
129
+ "TFTopPLogitsWarper",
130
+ ]
131
+ _import_structure["tf_utils"] = [
132
+ "TFGenerationMixin",
133
+ "TFGreedySearchDecoderOnlyOutput",
134
+ "TFGreedySearchEncoderDecoderOutput",
135
+ "TFSampleEncoderDecoderOutput",
136
+ "TFSampleDecoderOnlyOutput",
137
+ "TFBeamSearchEncoderDecoderOutput",
138
+ "TFBeamSearchDecoderOnlyOutput",
139
+ "TFBeamSampleEncoderDecoderOutput",
140
+ "TFBeamSampleDecoderOnlyOutput",
141
+ "TFContrastiveSearchEncoderDecoderOutput",
142
+ "TFContrastiveSearchDecoderOnlyOutput",
143
+ ]
144
+
145
+ try:
146
+ if not is_flax_available():
147
+ raise OptionalDependencyNotAvailable()
148
+ except OptionalDependencyNotAvailable:
149
+ pass
150
+ else:
151
+ _import_structure["flax_logits_process"] = [
152
+ "FlaxForcedBOSTokenLogitsProcessor",
153
+ "FlaxForcedEOSTokenLogitsProcessor",
154
+ "FlaxForceTokensLogitsProcessor",
155
+ "FlaxLogitsProcessor",
156
+ "FlaxLogitsProcessorList",
157
+ "FlaxLogitsWarper",
158
+ "FlaxMinLengthLogitsProcessor",
159
+ "FlaxSuppressTokensAtBeginLogitsProcessor",
160
+ "FlaxSuppressTokensLogitsProcessor",
161
+ "FlaxTemperatureLogitsWarper",
162
+ "FlaxTopKLogitsWarper",
163
+ "FlaxTopPLogitsWarper",
164
+ "FlaxWhisperTimeStampLogitsProcessor",
165
+ "FlaxNoRepeatNGramLogitsProcessor",
166
+ ]
167
+ _import_structure["flax_utils"] = [
168
+ "FlaxGenerationMixin",
169
+ "FlaxGreedySearchOutput",
170
+ "FlaxSampleOutput",
171
+ "FlaxBeamSearchOutput",
172
+ ]
173
+
174
+ if TYPE_CHECKING:
175
+ from .configuration_utils import GenerationConfig, GenerationMode
176
+ from .streamers import TextIteratorStreamer, TextStreamer
177
+
178
+ try:
179
+ if not is_torch_available():
180
+ raise OptionalDependencyNotAvailable()
181
+ except OptionalDependencyNotAvailable:
182
+ pass
183
+ else:
184
+ from .beam_constraints import Constraint, ConstraintListState, DisjunctiveConstraint, PhrasalConstraint
185
+ from .beam_search import BeamHypotheses, BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer
186
+ from .candidate_generator import AssistedCandidateGenerator, CandidateGenerator, PromptLookupCandidateGenerator
187
+ from .logits_process import (
188
+ AlternatingCodebooksLogitsProcessor,
189
+ ClassifierFreeGuidanceLogitsProcessor,
190
+ EncoderNoRepeatNGramLogitsProcessor,
191
+ EncoderRepetitionPenaltyLogitsProcessor,
192
+ EpsilonLogitsWarper,
193
+ EtaLogitsWarper,
194
+ ExponentialDecayLengthPenalty,
195
+ ForcedBOSTokenLogitsProcessor,
196
+ ForcedEOSTokenLogitsProcessor,
197
+ ForceTokensLogitsProcessor,
198
+ HammingDiversityLogitsProcessor,
199
+ InfNanRemoveLogitsProcessor,
200
+ LogitNormalization,
201
+ LogitsProcessor,
202
+ LogitsProcessorList,
203
+ LogitsWarper,
204
+ MinLengthLogitsProcessor,
205
+ MinNewTokensLengthLogitsProcessor,
206
+ NoBadWordsLogitsProcessor,
207
+ NoRepeatNGramLogitsProcessor,
208
+ PrefixConstrainedLogitsProcessor,
209
+ RepetitionPenaltyLogitsProcessor,
210
+ SequenceBiasLogitsProcessor,
211
+ SuppressTokensAtBeginLogitsProcessor,
212
+ SuppressTokensLogitsProcessor,
213
+ TemperatureLogitsWarper,
214
+ TopKLogitsWarper,
215
+ TopPLogitsWarper,
216
+ TypicalLogitsWarper,
217
+ UnbatchedClassifierFreeGuidanceLogitsProcessor,
218
+ WhisperTimeStampLogitsProcessor,
219
+ )
220
+ from .stopping_criteria import (
221
+ EosTokenCriteria,
222
+ MaxLengthCriteria,
223
+ MaxNewTokensCriteria,
224
+ MaxTimeCriteria,
225
+ StoppingCriteria,
226
+ StoppingCriteriaList,
227
+ validate_stopping_criteria,
228
+ )
229
+ from .utils import (
230
+ BeamSampleDecoderOnlyOutput,
231
+ BeamSampleEncoderDecoderOutput,
232
+ BeamSearchDecoderOnlyOutput,
233
+ BeamSearchEncoderDecoderOutput,
234
+ ContrastiveSearchDecoderOnlyOutput,
235
+ ContrastiveSearchEncoderDecoderOutput,
236
+ GenerateBeamDecoderOnlyOutput,
237
+ GenerateBeamEncoderDecoderOutput,
238
+ GenerateDecoderOnlyOutput,
239
+ GenerateEncoderDecoderOutput,
240
+ GenerationMixin,
241
+ GreedySearchDecoderOnlyOutput,
242
+ GreedySearchEncoderDecoderOutput,
243
+ SampleDecoderOnlyOutput,
244
+ SampleEncoderDecoderOutput,
245
+ )
246
+
247
+ try:
248
+ if not is_tf_available():
249
+ raise OptionalDependencyNotAvailable()
250
+ except OptionalDependencyNotAvailable:
251
+ pass
252
+ else:
253
+ from .tf_logits_process import (
254
+ TFForcedBOSTokenLogitsProcessor,
255
+ TFForcedEOSTokenLogitsProcessor,
256
+ TFForceTokensLogitsProcessor,
257
+ TFLogitsProcessor,
258
+ TFLogitsProcessorList,
259
+ TFLogitsWarper,
260
+ TFMinLengthLogitsProcessor,
261
+ TFNoBadWordsLogitsProcessor,
262
+ TFNoRepeatNGramLogitsProcessor,
263
+ TFRepetitionPenaltyLogitsProcessor,
264
+ TFSuppressTokensAtBeginLogitsProcessor,
265
+ TFSuppressTokensLogitsProcessor,
266
+ TFTemperatureLogitsWarper,
267
+ TFTopKLogitsWarper,
268
+ TFTopPLogitsWarper,
269
+ )
270
+ from .tf_utils import (
271
+ TFBeamSampleDecoderOnlyOutput,
272
+ TFBeamSampleEncoderDecoderOutput,
273
+ TFBeamSearchDecoderOnlyOutput,
274
+ TFBeamSearchEncoderDecoderOutput,
275
+ TFContrastiveSearchDecoderOnlyOutput,
276
+ TFContrastiveSearchEncoderDecoderOutput,
277
+ TFGenerationMixin,
278
+ TFGreedySearchDecoderOnlyOutput,
279
+ TFGreedySearchEncoderDecoderOutput,
280
+ TFSampleDecoderOnlyOutput,
281
+ TFSampleEncoderDecoderOutput,
282
+ )
283
+
284
+ try:
285
+ if not is_flax_available():
286
+ raise OptionalDependencyNotAvailable()
287
+ except OptionalDependencyNotAvailable:
288
+ pass
289
+ else:
290
+ from .flax_logits_process import (
291
+ FlaxForcedBOSTokenLogitsProcessor,
292
+ FlaxForcedEOSTokenLogitsProcessor,
293
+ FlaxForceTokensLogitsProcessor,
294
+ FlaxLogitsProcessor,
295
+ FlaxLogitsProcessorList,
296
+ FlaxLogitsWarper,
297
+ FlaxMinLengthLogitsProcessor,
298
+ FlaxNoRepeatNGramLogitsProcessor,
299
+ FlaxSuppressTokensAtBeginLogitsProcessor,
300
+ FlaxSuppressTokensLogitsProcessor,
301
+ FlaxTemperatureLogitsWarper,
302
+ FlaxTopKLogitsWarper,
303
+ FlaxTopPLogitsWarper,
304
+ FlaxWhisperTimeStampLogitsProcessor,
305
+ )
306
+ from .flax_utils import FlaxBeamSearchOutput, FlaxGenerationMixin, FlaxGreedySearchOutput, FlaxSampleOutput
307
+ else:
308
+ import sys
309
+
310
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.93 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc ADDED
Binary file (28.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc ADDED
Binary file (12.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc ADDED
Binary file (44.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc ADDED
Binary file (21.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc ADDED
Binary file (27.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc ADDED
Binary file (96.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc ADDED
Binary file (8.77 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc ADDED
Binary file (7.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc ADDED
Binary file (26.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc ADDED
Binary file (104 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc ADDED
Binary file (156 kB). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/beam_constraints.py ADDED
@@ -0,0 +1,521 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Optional
3
+
4
+
5
+ class Constraint(ABC):
6
+ r"""Abstract base class for all constraints that can be applied during generation.
7
+ It must define how the constraint can be satisfied.
8
+
9
+ All classes that inherit Constraint must follow the requirement that
10
+
11
+ ```py
12
+ completed = False
13
+ while not completed:
14
+ _, completed = constraint.update(constraint.advance())
15
+ ```
16
+
17
+ will always terminate (halt).
18
+ """
19
+
20
+ def __init__(self):
21
+ # test for the above condition
22
+ self.test()
23
+
24
+ def test(self):
25
+ """
26
+ Tests whether this constraint has been properly defined.
27
+ """
28
+ counter = 0
29
+ completed = False
30
+ while not completed:
31
+ if counter == 1:
32
+ self.reset()
33
+ advance = self.advance()
34
+ if not self.does_advance(advance):
35
+ raise Exception(
36
+ "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true."
37
+ )
38
+
39
+ stepped, completed, reset = self.update(advance)
40
+ counter += 1
41
+
42
+ if counter > 10000:
43
+ raise Exception("update() does not fulfill the constraint.")
44
+
45
+ if self.remaining() != 0:
46
+ raise Exception("Custom Constraint is not defined correctly.")
47
+
48
+ @abstractmethod
49
+ def advance(self):
50
+ """
51
+ When called, returns the token that would take this constraint one step closer to being fulfilled.
52
+
53
+ Return:
54
+ token_ids(`torch.tensor`): Must be a tensor of a list of indexable tokens, not some integer.
55
+ """
56
+ raise NotImplementedError(
57
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
58
+ )
59
+
60
+ @abstractmethod
61
+ def does_advance(self, token_id: int):
62
+ """
63
+ Reads in a token and returns whether it creates progress.
64
+ """
65
+ raise NotImplementedError(
66
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
67
+ )
68
+
69
+ @abstractmethod
70
+ def update(self, token_id: int):
71
+ """
72
+ Reads in a token and returns booleans that indicate the progress made by it. This function will update the
73
+ state of this object unlikes `does_advance(self, token_id: int)`.
74
+
75
+ This isn't to test whether a certain token will advance the progress; it's to update its state as if it has
76
+ been generated. This becomes important if token_id != desired token (refer to else statement in
77
+ PhrasalConstraint)
78
+
79
+ Args:
80
+ token_id(`int`):
81
+ The id of a newly generated token in the beam search.
82
+ Return:
83
+ stepped(`bool`):
84
+ Whether this constraint has become one step closer to being fulfuilled.
85
+ completed(`bool`):
86
+ Whether this constraint has been completely fulfilled by this token being generated.
87
+ reset (`bool`):
88
+ Whether this constraint has reset its progress by this token being generated.
89
+ """
90
+ raise NotImplementedError(
91
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
92
+ )
93
+
94
+ @abstractmethod
95
+ def reset(self):
96
+ """
97
+ Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of
98
+ a constraint is abrupted by an unwanted token.
99
+ """
100
+ raise NotImplementedError(
101
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
102
+ )
103
+
104
+ @abstractmethod
105
+ def remaining(self):
106
+ """
107
+ Returns the number of remaining steps of `advance()` in order to complete this constraint.
108
+ """
109
+ raise NotImplementedError(
110
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
111
+ )
112
+
113
+ @abstractmethod
114
+ def copy(self, stateful=False):
115
+ """
116
+ Creates a new instance of this constraint.
117
+
118
+ Args:
119
+ stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state.
120
+
121
+ Return:
122
+ constraint(`Constraint`): The same constraint as the one being called from.
123
+ """
124
+ raise NotImplementedError(
125
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
126
+ )
127
+
128
+
129
+ class PhrasalConstraint(Constraint):
130
+ r"""
131
+ [`Constraint`] enforcing that an ordered sequence of tokens is included in the output.
132
+
133
+ Args:
134
+ token_ids (`List[int]`):
135
+ The id of the token that must be generated by the output.
136
+ """
137
+
138
+ def __init__(self, token_ids: List[int]):
139
+ super(Constraint, self).__init__()
140
+
141
+ if not isinstance(token_ids, list) or len(token_ids) == 0:
142
+ raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.")
143
+ if any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids):
144
+ raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.")
145
+
146
+ self.token_ids = token_ids
147
+
148
+ self.seqlen = len(self.token_ids)
149
+ self.fulfilled_idx = -1 # the index of the currently fulfilled step
150
+ self.completed = False
151
+
152
+ def advance(self):
153
+ if self.completed:
154
+ return None
155
+ return self.token_ids[self.fulfilled_idx + 1]
156
+
157
+ def does_advance(self, token_id: int):
158
+ if not isinstance(token_id, int):
159
+ raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
160
+
161
+ if self.completed:
162
+ return False
163
+
164
+ return token_id == self.token_ids[self.fulfilled_idx + 1]
165
+
166
+ def update(self, token_id: int):
167
+ if not isinstance(token_id, int):
168
+ raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}")
169
+
170
+ stepped = False
171
+ completed = False
172
+ reset = False
173
+
174
+ if self.does_advance(token_id):
175
+ self.fulfilled_idx += 1
176
+ stepped = True
177
+ if self.fulfilled_idx == (self.seqlen - 1):
178
+ completed = True
179
+ self.completed = completed
180
+ else:
181
+ # failed to make progress.
182
+ reset = True
183
+ self.reset()
184
+ return stepped, completed, reset
185
+
186
+ def reset(self):
187
+ self.completed = False
188
+ self.fulfilled_idx = 0
189
+
190
+ def remaining(self):
191
+ return self.seqlen - (self.fulfilled_idx + 1)
192
+
193
+ def copy(self, stateful=False):
194
+ new_constraint = PhrasalConstraint(self.token_ids)
195
+
196
+ if stateful:
197
+ new_constraint.seq_len = self.seqlen
198
+ new_constraint.fulfilled_idx = self.fulfilled_idx
199
+ new_constraint.completed = self.completed
200
+
201
+ return new_constraint
202
+
203
+
204
+ class DisjunctiveTrie:
205
+ def __init__(self, nested_token_ids: List[List[int]], no_subsets=True):
206
+ r"""
207
+ A helper class that builds a trie with the words represented in `nested_token_ids`.
208
+ """
209
+ self.max_height = max([len(one) for one in nested_token_ids])
210
+
211
+ root = {}
212
+ for token_ids in nested_token_ids:
213
+ level = root
214
+ for tidx, token_id in enumerate(token_ids):
215
+ if token_id not in level:
216
+ level[token_id] = {}
217
+
218
+ level = level[token_id]
219
+
220
+ if no_subsets and self.has_subsets(root, nested_token_ids):
221
+ raise ValueError(
222
+ "Each list in `nested_token_ids` can't be a complete subset of another list, but is"
223
+ f" {nested_token_ids}."
224
+ )
225
+
226
+ self.trie = root
227
+
228
+ def next_tokens(self, current_seq):
229
+ """
230
+ The next possible tokens that will progress the trie, given the current sequence of tokens in `current_seq`.
231
+ """
232
+ start = self.trie
233
+
234
+ for current_token in current_seq:
235
+ start = start[current_token]
236
+
237
+ next_tokens = list(start.keys())
238
+
239
+ return next_tokens
240
+
241
+ def reached_leaf(self, current_seq):
242
+ next_tokens = self.next_tokens(current_seq)
243
+
244
+ return len(next_tokens) == 0
245
+
246
+ def count_leaves(self, root):
247
+ next_nodes = list(root.values())
248
+ if len(next_nodes) == 0:
249
+ return 1
250
+ else:
251
+ return sum([self.count_leaves(nn) for nn in next_nodes])
252
+
253
+ def has_subsets(self, trie, nested_token_ids):
254
+ """
255
+ Returns whether # of leaves == # of words. Otherwise some word is a subset of another.
256
+ """
257
+ leaf_count = self.count_leaves(trie)
258
+ return len(nested_token_ids) != leaf_count
259
+
260
+
261
+ class DisjunctiveConstraint(Constraint):
262
+ r"""
263
+ A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints.
264
+
265
+ Args:
266
+ nested_token_ids (`List[List[int]]`):
267
+ A list of words, where each word is a list of ids. This constraint is fulfilled by generating just one from
268
+ the list of words.
269
+ """
270
+
271
+ def __init__(self, nested_token_ids: List[List[int]]):
272
+ super(Constraint, self).__init__()
273
+
274
+ if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0:
275
+ raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.")
276
+ if any(not isinstance(token_ids, list) for token_ids in nested_token_ids):
277
+ raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.")
278
+ if any(
279
+ any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)
280
+ for token_ids in nested_token_ids
281
+ ):
282
+ raise ValueError(
283
+ f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}."
284
+ )
285
+
286
+ self.trie = DisjunctiveTrie(nested_token_ids)
287
+ self.token_ids = nested_token_ids
288
+
289
+ self.seqlen = self.trie.max_height
290
+ self.current_seq = []
291
+ self.completed = False
292
+
293
+ def advance(self):
294
+ token_list = self.trie.next_tokens(self.current_seq)
295
+
296
+ if len(token_list) == 0:
297
+ return None
298
+ else:
299
+ return token_list
300
+
301
+ def does_advance(self, token_id: int):
302
+ if not isinstance(token_id, int):
303
+ raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
304
+
305
+ next_tokens = self.trie.next_tokens(self.current_seq)
306
+
307
+ return token_id in next_tokens
308
+
309
+ def update(self, token_id: int):
310
+ if not isinstance(token_id, int):
311
+ raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}")
312
+
313
+ stepped = False
314
+ completed = False
315
+ reset = False
316
+
317
+ if self.does_advance(token_id):
318
+ self.current_seq.append(token_id)
319
+ stepped = True
320
+ else:
321
+ reset = True
322
+ self.reset()
323
+
324
+ completed = self.trie.reached_leaf(self.current_seq)
325
+ self.completed = completed
326
+
327
+ return stepped, completed, reset
328
+
329
+ def reset(self):
330
+ self.completed = False
331
+ self.current_seq = []
332
+
333
+ def remaining(self):
334
+ if self.completed:
335
+ # since this can be completed without reaching max height
336
+ return 0
337
+ else:
338
+ return self.seqlen - len(self.current_seq)
339
+
340
+ def copy(self, stateful=False):
341
+ new_constraint = DisjunctiveConstraint(self.token_ids)
342
+
343
+ if stateful:
344
+ new_constraint.seq_len = self.seqlen
345
+ new_constraint.current_seq = self.current_seq
346
+ new_constraint.completed = self.completed
347
+
348
+ return new_constraint
349
+
350
+
351
+ class ConstraintListState:
352
+ r"""
353
+ A class for beam scorers to track its progress through a list of constraints.
354
+
355
+ Args:
356
+ constraints (`List[Constraint]`):
357
+ A list of [`Constraint`] objects that must be fulfilled by the beam scorer.
358
+ """
359
+
360
+ def __init__(self, constraints: List[Constraint]):
361
+ self.constraints = constraints
362
+
363
+ # max # of steps required to fulfill a given constraint
364
+ self.max_seqlen = max([c.seqlen for c in constraints])
365
+ self.n_constraints = len(constraints)
366
+ self.completed = False
367
+
368
+ self.init_state()
369
+
370
+ def init_state(self):
371
+ self.complete_constraints = []
372
+ self.inprogress_constraint = None
373
+ self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints]
374
+
375
+ def get_bank(self):
376
+ add = 0
377
+ if self.inprogress_constraint:
378
+ # extra points for having a constraint mid-fulfilled
379
+ add += self.max_seqlen - self.inprogress_constraint.remaining()
380
+
381
+ return (len(self.complete_constraints) * self.max_seqlen) + add
382
+
383
+ def advance(self):
384
+ """The list of tokens to generate such that we can make progress.
385
+ By "list" we don't mean the list of token that will fully fulfill a constraint.
386
+
387
+ Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a
388
+ specific constraint `c_i`, we return:
389
+
390
+ `[t_k1 for k in indices of unfulfilled constraints]`
391
+
392
+ If we are in the middle of a constraint, then we return:
393
+ `[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint.
394
+
395
+ Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint,
396
+ that's the only one we'll return.
397
+ """
398
+ token_list = []
399
+ if self.inprogress_constraint is None:
400
+ for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
401
+ advance = constraint.advance()
402
+ if isinstance(advance, int):
403
+ token_list.append(advance)
404
+ elif isinstance(advance, list):
405
+ token_list.extend(advance)
406
+ else:
407
+ advance = self.inprogress_constraint.advance()
408
+ if isinstance(advance, int):
409
+ token_list.append(advance)
410
+ elif isinstance(advance, list):
411
+ token_list.extend(advance)
412
+
413
+ if len(token_list) == 0:
414
+ return None
415
+ else:
416
+ return token_list
417
+
418
+ def reset(self, token_ids: Optional[List[int]]):
419
+ """
420
+ token_ids: the tokens generated thus far to reset the state of the progress through constraints.
421
+ """
422
+ self.init_state()
423
+
424
+ if token_ids is not None:
425
+ for token in token_ids:
426
+ # completes or steps **one** constraint
427
+ complete, stepped = self.add(token)
428
+
429
+ # the entire list of constraints are fulfilled
430
+ if self.completed:
431
+ break
432
+
433
+ def add(self, token_id: int):
434
+ if not isinstance(token_id, int):
435
+ raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.")
436
+
437
+ complete, stepped = False, False
438
+
439
+ if self.completed:
440
+ complete = True
441
+ stepped = False
442
+ return complete, stepped
443
+
444
+ if self.inprogress_constraint is not None:
445
+ # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
446
+ # job, simply update the state
447
+
448
+ stepped, complete, reset = self.inprogress_constraint.update(token_id)
449
+ if reset:
450
+ # 1. If the next token breaks the progress, then we must restart.
451
+ # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
452
+
453
+ # But that doesn't mean we self.init_state(), since we only reset the state for this particular
454
+ # constraint, not the full list of constraints.
455
+
456
+ self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False))
457
+ self.inprogress_constraint = None
458
+
459
+ if complete:
460
+ # 2. If the next token completes the constraint, move it to completed list, set
461
+ # inprogress to None. If there are no pending constraints either, then this full list of constraints
462
+ # is complete.
463
+
464
+ self.complete_constraints.append(self.inprogress_constraint)
465
+ self.inprogress_constraint = None
466
+
467
+ if len(self.pending_constraints) == 0:
468
+ # we're done!
469
+ self.completed = True
470
+
471
+ else:
472
+ # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
473
+ # of constraints?
474
+
475
+ for cidx, pending_constraint in enumerate(self.pending_constraints):
476
+ if pending_constraint.does_advance(token_id):
477
+ stepped, complete, reset = pending_constraint.update(token_id)
478
+
479
+ if not stepped:
480
+ raise Exception(
481
+ "`constraint.update(token_id)` is not yielding incremental progress, "
482
+ "even though `constraint.does_advance(token_id)` is true."
483
+ )
484
+
485
+ if complete:
486
+ self.complete_constraints.append(pending_constraint)
487
+ self.inprogress_constraint = None
488
+
489
+ if not complete and stepped:
490
+ self.inprogress_constraint = pending_constraint
491
+
492
+ if complete or stepped:
493
+ # If we made any progress at all, then it's at least not a "pending constraint".
494
+
495
+ self.pending_constraints = (
496
+ self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
497
+ )
498
+
499
+ if len(self.pending_constraints) == 0 and self.inprogress_constraint is None:
500
+ # If there's no longer any pending after this and no inprogress either, then we must be
501
+ # complete.
502
+
503
+ self.completed = True
504
+
505
+ break # prevent accidentally stepping through multiple constraints with just one token.
506
+
507
+ return complete, stepped
508
+
509
+ def copy(self, stateful=True):
510
+ new_state = ConstraintListState(self.constraints) # we actually never though self.constraints objects
511
+ # throughout this process. So it's at initialization state.
512
+
513
+ if stateful:
514
+ new_state.complete_constraints = [
515
+ constraint.copy(stateful=True) for constraint in self.complete_constraints
516
+ ]
517
+ if self.inprogress_constraint is not None:
518
+ new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True)
519
+ new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints]
520
+
521
+ return new_state
llmeval-env/lib/python3.10/site-packages/transformers/generation/beam_search.py ADDED
@@ -0,0 +1,1005 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Inc. team
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from abc import ABC, abstractmethod
17
+ from collections import UserDict
18
+ from typing import Dict, List, Optional, Tuple, Union
19
+
20
+ import numpy as np
21
+ import torch
22
+
23
+ from ..utils import add_start_docstrings
24
+ from .beam_constraints import Constraint, ConstraintListState
25
+
26
+
27
+ PROCESS_INPUTS_DOCSTRING = r"""
28
+ Args:
29
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
30
+ Indices of input sequence tokens in the vocabulary.
31
+
32
+ Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
33
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
34
+
35
+ [What are input IDs?](../glossary#input-ids)
36
+ next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`):
37
+ Current scores of the top `2 * num_beams` non-finished beam hypotheses.
38
+ next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
39
+ `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses.
40
+ next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
41
+ Beam indices indicating to which beam hypothesis the `next_tokens` correspond.
42
+ pad_token_id (`int`, *optional*):
43
+ The id of the *padding* token.
44
+ eos_token_id (`Union[int, List[int]]`, *optional*):
45
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
46
+ beam_indices (`torch.LongTensor`, *optional*):
47
+ Beam indices indicating to which beam hypothesis each token correspond.
48
+ group_index (`int`, *optional*):
49
+ The index of the group of beams. Used with [`~PreTrainedModel.group_beam_search`].
50
+
51
+ Return:
52
+ `UserDict`: A dictionary composed of the fields as defined above:
53
+
54
+ - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of all
55
+ non-finished beams.
56
+ - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be added
57
+ to the non-finished beam_hypotheses.
58
+ - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices
59
+ indicating to which beam the next tokens shall be added.
60
+
61
+ """
62
+
63
+ FINALIZE_INPUTS_DOCSTRING = r"""
64
+ Args:
65
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
66
+ Indices of input sequence tokens in the vocabulary.
67
+
68
+ Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
69
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
70
+
71
+ [What are input IDs?](../glossary#input-ids)
72
+ final_beam_scores (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
73
+ The final scores of all non-finished beams.
74
+ final_beam_tokens (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
75
+ The last tokens to be added to the non-finished beam_hypotheses.
76
+ final_beam_indices (`torch.FloatTensor` of shape `(batch_size * num_beams)`):
77
+ The beam indices indicating to which beam the `final_beam_tokens` shall be added.
78
+ pad_token_id (`int`, *optional*):
79
+ The id of the *padding* token.
80
+ eos_token_id (`Union[int, List[int]]`, *optional*):
81
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
82
+
83
+ Return:
84
+ `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences.
85
+ The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early
86
+ due to the `eos_token_id`.
87
+
88
+ """
89
+
90
+
91
+ class BeamScorer(ABC):
92
+ """
93
+ Abstract base class for all beam scorers that are used for [`~PreTrainedModel.beam_search`] and
94
+ [`~PreTrainedModel.beam_sample`].
95
+ """
96
+
97
+ @abstractmethod
98
+ @add_start_docstrings(PROCESS_INPUTS_DOCSTRING)
99
+ def process(
100
+ self,
101
+ input_ids: torch.LongTensor,
102
+ next_scores: torch.FloatTensor,
103
+ next_tokens: torch.LongTensor,
104
+ next_indices: torch.LongTensor,
105
+ **kwargs,
106
+ ) -> Tuple[torch.Tensor]:
107
+ raise NotImplementedError("This is an abstract method.")
108
+
109
+ @abstractmethod
110
+ @add_start_docstrings(FINALIZE_INPUTS_DOCSTRING)
111
+ def finalize(
112
+ self,
113
+ input_ids: torch.LongTensor,
114
+ next_scores: torch.FloatTensor,
115
+ next_tokens: torch.LongTensor,
116
+ next_indices: torch.LongTensor,
117
+ max_length: int,
118
+ **kwargs,
119
+ ) -> torch.LongTensor:
120
+ raise NotImplementedError("This is an abstract method.")
121
+
122
+
123
+ class BeamSearchScorer(BeamScorer):
124
+ r"""
125
+ [`BeamScorer`] implementing standard beam search decoding.
126
+
127
+ Adapted in part from [Facebook's XLM beam search
128
+ code](https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529).
129
+
130
+ Reference for the diverse beam search algorithm and implementation [Ashwin Kalyan's DBS
131
+ implementation](https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua)
132
+
133
+ Args:
134
+ batch_size (`int`):
135
+ Batch Size of `input_ids` for which standard beam search decoding is run in parallel.
136
+ num_beams (`int`):
137
+ Number of beams for beam search.
138
+ device (`torch.device`):
139
+ Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be
140
+ allocated.
141
+ length_penalty (`float`, *optional*, defaults to 1.0):
142
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
143
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
144
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
145
+ `length_penalty` < 0.0 encourages shorter sequences.
146
+ do_early_stopping (`bool` or `str`, *optional*, defaults to `False`):
147
+ Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
148
+ `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
149
+ heuristic is applied and the generation stops when is it very unlikely to find better candidates;
150
+ `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
151
+ beam search algorithm).
152
+ num_beam_hyps_to_keep (`int`, *optional*, defaults to 1):
153
+ The number of beam hypotheses that shall be returned upon calling
154
+ [`~transformers.BeamSearchScorer.finalize`].
155
+ num_beam_groups (`int`, *optional*, defaults to 1):
156
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
157
+ See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
158
+ max_length (`int`, *optional*):
159
+ The maximum length of the sequence to be generated.
160
+ """
161
+
162
+ def __init__(
163
+ self,
164
+ batch_size: int,
165
+ num_beams: int,
166
+ device: torch.device,
167
+ length_penalty: Optional[float] = 1.0,
168
+ do_early_stopping: Optional[Union[bool, str]] = False,
169
+ num_beam_hyps_to_keep: Optional[int] = 1,
170
+ num_beam_groups: Optional[int] = 1,
171
+ max_length: Optional[int] = None,
172
+ ):
173
+ self.num_beams = num_beams
174
+ self.device = device
175
+ self.length_penalty = length_penalty
176
+ self.do_early_stopping = do_early_stopping
177
+ self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
178
+ self.num_beam_groups = num_beam_groups
179
+ self.group_size = self.num_beams // self.num_beam_groups
180
+
181
+ self._is_init = False
182
+ # self._beam_hyps[i*self.num_beam_groups+j] is the beam_hyps of the j-th group in the i-th mini-batch.
183
+ # If group_beam_search is not used, the list consists of `batch_size` beam_hyps.
184
+ self._beam_hyps = [
185
+ BeamHypotheses(
186
+ num_beams=self.group_size,
187
+ length_penalty=self.length_penalty,
188
+ early_stopping=self.do_early_stopping,
189
+ max_length=max_length,
190
+ )
191
+ for _ in range(batch_size * self.num_beam_groups)
192
+ ]
193
+ # self._done[i*self.num_beam_groups+j] indicates whether the generation of the beam_hyps of the j-th group
194
+ # in the i-th mini-batch is complete.
195
+ self._done = torch.tensor(
196
+ [False for _ in range(batch_size * self.num_beam_groups)], dtype=torch.bool, device=self.device
197
+ )
198
+
199
+ if not isinstance(num_beams, int) or num_beams <= 1:
200
+ raise ValueError(
201
+ f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1,"
202
+ " one should make use of `greedy_search` instead."
203
+ )
204
+
205
+ if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
206
+ raise ValueError(
207
+ "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be"
208
+ f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
209
+ )
210
+
211
+ @property
212
+ def is_done(self) -> bool:
213
+ return self._done.all()
214
+
215
+ def process(
216
+ self,
217
+ input_ids: torch.LongTensor,
218
+ next_scores: torch.FloatTensor,
219
+ next_tokens: torch.LongTensor,
220
+ next_indices: torch.LongTensor,
221
+ pad_token_id: Optional[int] = None,
222
+ eos_token_id: Optional[Union[int, List[int]]] = None,
223
+ beam_indices: Optional[torch.LongTensor] = None,
224
+ group_index: Optional[int] = 0,
225
+ decoder_prompt_len: Optional[int] = 0,
226
+ ) -> Dict[str, torch.Tensor]:
227
+ # add up to the length which the next_scores is calculated on (including decoder prompt)
228
+ cur_len = input_ids.shape[-1] + 1
229
+ batch_size = len(self._beam_hyps) // self.num_beam_groups
230
+
231
+ if not (batch_size == (input_ids.shape[0] // self.group_size)):
232
+ if self.num_beam_groups > 1:
233
+ raise ValueError(
234
+ f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
235
+ f"size of {self.group_size} is expected by the beam scorer."
236
+ )
237
+ else:
238
+ raise ValueError(
239
+ f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
240
+ f"{self.group_size} is expected by the beam scorer."
241
+ )
242
+
243
+ device = input_ids.device
244
+ next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
245
+ next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
246
+ next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
247
+
248
+ if isinstance(eos_token_id, int):
249
+ eos_token_id = [eos_token_id]
250
+
251
+ for batch_idx in range(batch_size):
252
+ batch_group_idx = batch_idx * self.num_beam_groups + group_index
253
+ if self._done[batch_group_idx]:
254
+ if self.num_beams < len(self._beam_hyps[batch_group_idx]):
255
+ raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
256
+ if eos_token_id is None or pad_token_id is None:
257
+ raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
258
+ # pad the batch
259
+ next_beam_scores[batch_idx, :] = 0
260
+ next_beam_tokens[batch_idx, :] = pad_token_id
261
+ next_beam_indices[batch_idx, :] = 0
262
+ continue
263
+
264
+ # next tokens for this sentence
265
+ beam_idx = 0
266
+ for beam_token_rank, (next_token, next_score, next_index) in enumerate(
267
+ zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
268
+ ):
269
+ batch_beam_idx = batch_idx * self.group_size + next_index
270
+ # add to generated hypotheses if end of sentence
271
+ if (eos_token_id is not None) and (next_token.item() in eos_token_id):
272
+ # if beam_token does not belong to top num_beams tokens, it should not be added
273
+ is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
274
+ if is_beam_token_worse_than_top_num_beams:
275
+ continue
276
+ if beam_indices is not None:
277
+ beam_index = beam_indices[batch_beam_idx]
278
+ beam_index = beam_index + (batch_beam_idx,)
279
+ else:
280
+ beam_index = None
281
+
282
+ self._beam_hyps[batch_group_idx].add(
283
+ input_ids[batch_beam_idx].clone(),
284
+ next_score.item(),
285
+ beam_indices=beam_index,
286
+ generated_len=cur_len - decoder_prompt_len,
287
+ )
288
+ else:
289
+ # add next predicted token since it is not eos_token
290
+ next_beam_scores[batch_idx, beam_idx] = next_score
291
+ next_beam_tokens[batch_idx, beam_idx] = next_token
292
+ next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
293
+ beam_idx += 1
294
+
295
+ # once the beam for next step is full, don't add more tokens to it.
296
+ if beam_idx == self.group_size:
297
+ break
298
+
299
+ if beam_idx < self.group_size:
300
+ raise ValueError(
301
+ f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:"
302
+ f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
303
+ )
304
+
305
+ # Check if we are done so that we can save a pad step if all(done)
306
+ self._done[batch_group_idx] = self._done[batch_group_idx] or self._beam_hyps[batch_group_idx].is_done(
307
+ next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len
308
+ )
309
+
310
+ return UserDict(
311
+ {
312
+ "next_beam_scores": next_beam_scores.view(-1),
313
+ "next_beam_tokens": next_beam_tokens.view(-1),
314
+ "next_beam_indices": next_beam_indices.view(-1),
315
+ }
316
+ )
317
+
318
+ def finalize(
319
+ self,
320
+ input_ids: torch.LongTensor,
321
+ final_beam_scores: torch.FloatTensor,
322
+ final_beam_tokens: torch.LongTensor,
323
+ final_beam_indices: torch.LongTensor,
324
+ max_length: int,
325
+ pad_token_id: Optional[int] = None,
326
+ eos_token_id: Optional[Union[int, List[int]]] = None,
327
+ beam_indices: Optional[torch.LongTensor] = None,
328
+ decoder_prompt_len: Optional[int] = 0,
329
+ ) -> Tuple[torch.LongTensor]:
330
+ batch_size = len(self._beam_hyps) // self.num_beam_groups
331
+
332
+ if isinstance(eos_token_id, int):
333
+ eos_token_id = [eos_token_id]
334
+
335
+ # finalize all open beam hypotheses and add to generated hypotheses
336
+ for batch_group_idx, beam_hyp in enumerate(self._beam_hyps):
337
+ if self._done[batch_group_idx]:
338
+ continue
339
+
340
+ # all open beam hypotheses are added to the beam hypothesis
341
+ # beam hypothesis class automatically keeps the best beams
342
+ for index_per_group in range(self.group_size):
343
+ batch_beam_idx = batch_group_idx * self.group_size + index_per_group
344
+ final_score = final_beam_scores[batch_beam_idx].item()
345
+ final_tokens = input_ids[batch_beam_idx]
346
+ beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None
347
+ generated_len = final_tokens.shape[-1] - decoder_prompt_len
348
+ beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len)
349
+
350
+ # select the best hypotheses
351
+ sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep)
352
+ best = []
353
+ best_indices = []
354
+ best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
355
+
356
+ # retrieve best hypotheses
357
+ for i in range(batch_size):
358
+ beam_hyps_in_batch = self._beam_hyps[i * self.num_beam_groups : (i + 1) * self.num_beam_groups]
359
+ candidate_beams = [beam for beam_hyp in beam_hyps_in_batch for beam in beam_hyp.beams]
360
+ sorted_hyps = sorted(candidate_beams, key=lambda x: x[0])
361
+ for j in range(self.num_beam_hyps_to_keep):
362
+ best_hyp_tuple = sorted_hyps.pop()
363
+ best_score = best_hyp_tuple[0]
364
+ best_hyp = best_hyp_tuple[1]
365
+ best_index = best_hyp_tuple[2]
366
+ sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
367
+
368
+ # append hyp to lists
369
+ best.append(best_hyp)
370
+
371
+ # append indices to list
372
+ best_indices.append(best_index)
373
+
374
+ best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
375
+
376
+ # prepare for adding eos
377
+ sent_lengths_max = sent_lengths.max().item() + 1
378
+ sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max
379
+ decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
380
+
381
+ if len(best_indices) > 0 and best_indices[0] is not None:
382
+ indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
383
+ else:
384
+ indices = None
385
+
386
+ # shorter batches are padded if needed
387
+ if sent_lengths.min().item() != sent_lengths.max().item():
388
+ if pad_token_id is None:
389
+ raise ValueError("`pad_token_id` has to be defined")
390
+ decoded.fill_(pad_token_id)
391
+
392
+ if indices is not None:
393
+ indices.fill_(-1)
394
+
395
+ # fill with hypotheses and eos_token_id if the latter fits in
396
+ for i, (hypo, best_idx) in enumerate(zip(best, best_indices)):
397
+ decoded[i, : sent_lengths[i]] = hypo
398
+
399
+ if indices is not None:
400
+ indices[i, : len(best_idx)] = torch.tensor(best_idx)
401
+
402
+ if sent_lengths[i] < sent_max_len:
403
+ # inserting only the first eos_token_id
404
+ decoded[i, sent_lengths[i]] = eos_token_id[0]
405
+
406
+ return UserDict(
407
+ {
408
+ "sequences": decoded,
409
+ "sequence_scores": best_scores,
410
+ "beam_indices": indices,
411
+ }
412
+ )
413
+
414
+
415
+ class ConstrainedBeamSearchScorer(BeamScorer):
416
+ r"""
417
+ [`BeamScorer`] implementing constrained beam search decoding.
418
+
419
+
420
+ Args:
421
+ batch_size (`int`):
422
+ Batch Size of `input_ids` for which standard beam search decoding is run in parallel.
423
+ num_beams (`int`):
424
+ Number of beams for beam search.
425
+ constraints (`List[Constraint]`):
426
+ A list of positive constraints represented as `Constraint` objects that must be fulfilled in the generation
427
+ output. For more information, the documentation of [`Constraint`] should be read.
428
+ device (`torch.device`):
429
+ Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be
430
+ allocated.
431
+ length_penalty (`float`, *optional*, defaults to 1.0):
432
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
433
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
434
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
435
+ `length_penalty` < 0.0 encourages shorter sequences.
436
+ do_early_stopping (`bool` or `str`, *optional*, defaults to `False`):
437
+ Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
438
+ `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
439
+ heuristic is applied and the generation stops when is it very unlikely to find better candidates;
440
+ `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
441
+ beam search algorithm).
442
+ num_beam_hyps_to_keep (`int`, *optional*, defaults to 1):
443
+ The number of beam hypotheses that shall be returned upon calling
444
+ [`~transformers.BeamSearchScorer.finalize`].
445
+ num_beam_groups (`int`, *optional*, defaults to 1):
446
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
447
+ See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
448
+ max_length (`int`, *optional*):
449
+ The maximum length of the sequence to be generated.
450
+ """
451
+
452
+ def __init__(
453
+ self,
454
+ batch_size: int,
455
+ num_beams: int,
456
+ constraints: List[Constraint],
457
+ device: torch.device,
458
+ length_penalty: Optional[float] = 1.0,
459
+ do_early_stopping: Optional[Union[bool, str]] = False,
460
+ num_beam_hyps_to_keep: Optional[int] = 1,
461
+ num_beam_groups: Optional[int] = 1,
462
+ max_length: Optional[int] = None,
463
+ ):
464
+ self.num_beams = num_beams
465
+ self.device = device
466
+ self.length_penalty = length_penalty
467
+ self.do_early_stopping = do_early_stopping
468
+ self.num_beam_hyps_to_keep = num_beam_hyps_to_keep
469
+ self.num_beam_groups = num_beam_groups
470
+ self.group_size = self.num_beams // self.num_beam_groups
471
+ self.constraints = constraints
472
+
473
+ self._is_init = False
474
+ self._beam_hyps = [
475
+ BeamHypotheses(
476
+ num_beams=self.num_beams,
477
+ length_penalty=self.length_penalty,
478
+ early_stopping=self.do_early_stopping,
479
+ max_length=max_length,
480
+ )
481
+ for _ in range(batch_size)
482
+ ]
483
+ self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device)
484
+
485
+ if not isinstance(num_beams, int) or num_beams <= 1:
486
+ raise ValueError(
487
+ f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1,"
488
+ " one should make use of `greedy_search` instead."
489
+ )
490
+
491
+ if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0):
492
+ raise ValueError(
493
+ "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be"
494
+ f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}."
495
+ )
496
+
497
+ @property
498
+ def is_done(self) -> bool:
499
+ return self._done.all()
500
+
501
+ def make_constraint_states(self, n):
502
+ return [ConstraintListState([constraint.copy() for constraint in self.constraints]) for _ in range(n)]
503
+
504
+ def check_completes_constraints(self, sequence):
505
+ new_state = self.make_constraint_states(1)[0]
506
+ new_state.reset(sequence)
507
+ return new_state.completed
508
+
509
+ def process(
510
+ self,
511
+ input_ids: torch.LongTensor,
512
+ next_scores: torch.FloatTensor,
513
+ next_tokens: torch.LongTensor,
514
+ next_indices: torch.LongTensor,
515
+ scores_for_all_vocab: torch.FloatTensor,
516
+ pad_token_id: Optional[int] = None,
517
+ eos_token_id: Optional[Union[int, List[int]]] = None,
518
+ beam_indices: Optional[torch.LongTensor] = None,
519
+ decoder_prompt_len: Optional[int] = 0,
520
+ ) -> Tuple[torch.Tensor]:
521
+ r"""
522
+ Args:
523
+ input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`):
524
+ Indices of input sequence tokens in the vocabulary.
525
+
526
+ Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See
527
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
528
+
529
+ [What are input IDs?](../glossary#input-ids)
530
+ next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`):
531
+ Current scores of the top `2 * num_beams` non-finished beam hypotheses.
532
+ next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
533
+ `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses.
534
+ next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`):
535
+ Beam indices indicating to which beam hypothesis the `next_tokens` correspond.
536
+ scores_for_all_vocab (`torch.FloatTensor` of shape `(batch_size * num_beams, sequence_length)`):
537
+ The scores of all tokens in the vocabulary for each of the beam hypotheses.
538
+ pad_token_id (`int`, *optional*):
539
+ The id of the *padding* token.
540
+ eos_token_id (`Union[int, List[int]]`, *optional*):
541
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
542
+ beam_indices (`torch.LongTensor`, *optional*):
543
+ Beam indices indicating to which beam hypothesis each token correspond.
544
+ decoder_prompt_len (`int`, *optional*):
545
+ The length of prompt that is included in the input to decoder.
546
+ Return:
547
+ `UserDict`: A dictionary composed of the fields as defined above:
548
+
549
+ - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of
550
+ all
551
+ non-finished beams.
552
+
553
+ - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be
554
+ added
555
+ to the non-finished beam_hypotheses.
556
+ - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices
557
+ indicating to which beam the next tokens shall be added.
558
+ """
559
+
560
+ # add up to the length which the next_scores is calculated on (including decoder prompt)
561
+ cur_len = input_ids.shape[-1] + 1
562
+ batch_size = len(self._beam_hyps)
563
+ if not (batch_size == (input_ids.shape[0] // self.group_size)):
564
+ if self.num_beam_groups > 1:
565
+ raise ValueError(
566
+ f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam "
567
+ f"size of {self.group_size} is expected by the beam scorer."
568
+ )
569
+ else:
570
+ raise ValueError(
571
+ f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of "
572
+ f"{self.group_size} is expected by the beam scorer."
573
+ )
574
+
575
+ device = input_ids.device
576
+
577
+ next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device)
578
+ next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device)
579
+ next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device)
580
+
581
+ if isinstance(eos_token_id, int):
582
+ eos_token_id = [eos_token_id]
583
+
584
+ for batch_idx, beam_hyp in enumerate(self._beam_hyps):
585
+ if self._done[batch_idx]:
586
+ if self.num_beams < len(beam_hyp):
587
+ raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated")
588
+ if eos_token_id is None or pad_token_id is None:
589
+ raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined")
590
+ # pad the batch
591
+ next_beam_scores[batch_idx, :] = 0
592
+ next_beam_tokens[batch_idx, :] = pad_token_id
593
+ next_beam_indices[batch_idx, :] = 0
594
+ continue
595
+
596
+ # next tokens for this sentence.
597
+ beam_idx = 0
598
+ for beam_token_rank, (next_token, next_score, next_index) in enumerate(
599
+ zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx])
600
+ ):
601
+ batch_beam_idx = batch_idx * self.group_size + next_index
602
+ # add to generated hypotheses if end of sentence
603
+ if (eos_token_id is not None) and (next_token.item() in eos_token_id):
604
+ # if beam_token does not belong to top num_beams tokens, it should not be added
605
+ is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size
606
+ if is_beam_token_worse_than_top_num_beams:
607
+ continue
608
+
609
+ completes_constraint = self.check_completes_constraints(input_ids[batch_beam_idx].cpu().tolist())
610
+ if completes_constraint:
611
+ if beam_indices is not None:
612
+ beam_index = beam_indices[batch_beam_idx]
613
+ beam_index = beam_index + (batch_beam_idx,)
614
+ else:
615
+ beam_index = None
616
+
617
+ beam_hyp.add(
618
+ input_ids[batch_beam_idx].clone(),
619
+ next_score.item(),
620
+ beam_indices=beam_index,
621
+ generated_len=cur_len - decoder_prompt_len,
622
+ )
623
+ else:
624
+ # add next predicted token since it is not eos_token
625
+ next_beam_scores[batch_idx, beam_idx] = next_score
626
+ next_beam_tokens[batch_idx, beam_idx] = next_token
627
+ next_beam_indices[batch_idx, beam_idx] = batch_beam_idx
628
+ beam_idx += 1
629
+
630
+ # once the beam for next step is full, don't add more tokens to it.
631
+ if beam_idx == self.group_size:
632
+ break
633
+
634
+ new_scores, new_tokens, new_indices = self.step_sentence_constraint(
635
+ batch_idx,
636
+ input_ids,
637
+ scores_for_all_vocab,
638
+ next_beam_scores[batch_idx],
639
+ next_beam_tokens[batch_idx],
640
+ next_beam_indices[batch_idx],
641
+ )
642
+
643
+ next_beam_scores[batch_idx] = new_scores
644
+ next_beam_tokens[batch_idx] = new_tokens
645
+ next_beam_indices[batch_idx] = new_indices
646
+
647
+ if beam_idx < self.group_size:
648
+ raise ValueError(
649
+ f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:"
650
+ f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected."
651
+ )
652
+
653
+ # Check if we are done so that we can save a pad step if all(done)
654
+ self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done(
655
+ next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len
656
+ )
657
+
658
+ return UserDict(
659
+ {
660
+ "next_beam_scores": next_beam_scores.view(-1),
661
+ "next_beam_tokens": next_beam_tokens.view(-1),
662
+ "next_beam_indices": next_beam_indices.view(-1),
663
+ }
664
+ )
665
+
666
+ def step_sentence_constraint(
667
+ self,
668
+ batch_idx: int,
669
+ input_ids: torch.LongTensor,
670
+ vocab_scores: torch.FloatTensor,
671
+ sent_beam_scores: torch.FloatTensor,
672
+ sent_beam_tokens: torch.LongTensor,
673
+ sent_beam_indices: torch.LongTensor,
674
+ push_progress: bool = False,
675
+ ):
676
+ # sent_beam_tokens are the next {num_beams} number of tokens that are under consideration for this beam
677
+ # (candidate next tokens)
678
+
679
+ # 1. Adding "advance_tokens"
680
+ # using ConstraintStateList.advance(), we propose new tokens to be added into this "candidate list" that will
681
+ # advance us in fulfilling the constraints.
682
+
683
+ # 2. Selecting best candidates such that we end up with highest probable candidates
684
+ # that fulfill our constraints.
685
+
686
+ orig_len = sent_beam_indices.size(0)
687
+ device = sent_beam_indices.device
688
+
689
+ # initialize states
690
+ topk_contraint_states = self.make_constraint_states(orig_len)
691
+ advance_constraint_states = self.make_constraint_states(orig_len)
692
+
693
+ sidx, eidx = batch_idx * orig_len, (batch_idx + 1) * orig_len
694
+ this_batch_input_ids = input_ids[sidx:eidx]
695
+ this_batch_token_scores = vocab_scores[sidx:eidx]
696
+ full_hypotheses = torch.cat((input_ids[sent_beam_indices], sent_beam_tokens.unsqueeze(-1)), dim=-1)
697
+
698
+ # need to make new hypothesis that advance the constraints
699
+ track_new = {
700
+ "new_seqs": full_hypotheses.tolist(),
701
+ "new_states": [],
702
+ "new_indices": [],
703
+ "new_tokens": [],
704
+ "new_scores": [],
705
+ }
706
+ for seq_idx, pre_seq in enumerate(this_batch_input_ids):
707
+ # pre_seq = ith sequence generated before this step.
708
+
709
+ # input_ids -> (topk) generic beam search best model next tokens
710
+ # -> (advance) constraints forcing the next token
711
+ # either way, we need to sort them into "banks" later, so store a "ConstraintListState" for all types of
712
+ # hypotheses.
713
+
714
+ topk_state = topk_contraint_states[seq_idx]
715
+ topk_state.reset(full_hypotheses[seq_idx].cpu().tolist())
716
+
717
+ advance_state = advance_constraint_states[seq_idx]
718
+ advance_state.reset(pre_seq.cpu().tolist())
719
+
720
+ if not advance_state.completed:
721
+ advance_tokens = torch.LongTensor(advance_state.advance()).to(device)
722
+ for advance_token in advance_tokens:
723
+ # since adding each `advance_token` leads to a different hypothesis, create new state instance.
724
+ new_state = advance_state.copy(stateful=True)
725
+ new_state.add(advance_token.cpu().tolist())
726
+
727
+ advance_seq = torch.cat((pre_seq, advance_token.unsqueeze(0)), -1).cpu().tolist()
728
+ if advance_seq not in track_new["new_seqs"]:
729
+ # prevent duplicates, which are basically bound to happen in this process.
730
+ track_new["new_seqs"].append(advance_seq)
731
+ track_new["new_indices"].append(sidx + seq_idx) # idx -> global idx across all the batches
732
+ track_new["new_tokens"].append(advance_token)
733
+ track_new["new_scores"].append(this_batch_token_scores[seq_idx].take(advance_token))
734
+ track_new["new_states"].append(new_state)
735
+ elif push_progress:
736
+ # Basically, `sent_beam_indices` often chooses very little among `input_ids` the generated sequences that
737
+ # actually fulfill our constraints. For example, let constraints == ["loves pies"] and
738
+
739
+ # pre_seq_1 = "The child loves pies and" pre_seq_2 = "The child plays in the playground and"
740
+
741
+ # Without this step, if `sent_beam_indices` is something like [1,1], then
742
+ # 1. `pre_seq_1` won't be added to the list of (topk) hypothesis since it's not in the indices and
743
+ # 2. it won't be added to the list of (advance) hypothesis since it's completed already. (this is
744
+ # the else part of `if constraints_completed[seq_idx]`)
745
+ # 3. it ends up simply getting removed from consideration.
746
+
747
+ # #3 might be fine and actually desired, since it's likely that it's a low-probability output anyways,
748
+ # especially if it's not in the list of `sent_beam_indices`. But this often leads to lengthened beam
749
+ # search times, since completed sequences keep getting removed after all this effort for constrained
750
+ # generation.
751
+
752
+ # Here, we basically take `pre_seq_1` and to "push" it into the considered list of hypotheses, by simply
753
+ # appending the next likely token in the vocabulary and adding it to the list of hypotheses.
754
+
755
+ new_score, new_token = torch.max(this_batch_token_scores[seq_idx], 0) # some next probable token
756
+ advance_seq = torch.cat((pre_seq, new_token.unsqueeze(0)), -1)
757
+
758
+ advance_state = advance_constraint_states[seq_idx]
759
+
760
+ advance_seq = advance_seq.cpu().tolist()
761
+
762
+ advance_state.reset(advance_seq)
763
+ if advance_seq not in track_new["new_seqs"]:
764
+ # but still don't want to have duplicates
765
+ track_new["new_seqs"].append(advance_seq)
766
+ track_new["new_indices"].append(seq_idx)
767
+ track_new["new_tokens"].append(new_token)
768
+ track_new["new_scores"].append(new_score)
769
+ track_new["new_states"].append(advance_state)
770
+
771
+ if len(track_new["new_indices"]) > 0:
772
+ new_indices = torch.tensor(track_new["new_indices"]).to(device)
773
+ new_tokens = torch.stack(track_new["new_tokens"]).to(device)
774
+ new_scores = torch.stack(track_new["new_scores"]).to(device)
775
+
776
+ all_states = topk_contraint_states + track_new["new_states"]
777
+ all_tokens = torch.cat((sent_beam_tokens, new_tokens), -1)
778
+ all_scores = torch.cat((sent_beam_scores, new_scores), -1)
779
+ all_banks = torch.tensor([one.get_bank() for one in all_states]).to(device)
780
+
781
+ zipped = all_banks * 100 + all_scores
782
+ indices = zipped.sort(descending=True).indices
783
+ sorted_banks = all_banks[indices]
784
+
785
+ # Then we end up with {sorted among bank C}, {sorted among bank C-1}, ..., {sorted among bank 0}
786
+
787
+ counter = -1
788
+ cur_bank = sorted_banks[0]
789
+ increments = []
790
+ for bank in sorted_banks:
791
+ if bank == cur_bank:
792
+ counter += 1
793
+ else:
794
+ counter = 0
795
+ cur_bank = bank
796
+ increments.append(counter)
797
+ rearrangers = torch.tensor(np.argsort(increments, kind="mergesort"))
798
+
799
+ indices = indices[rearrangers][:orig_len]
800
+
801
+ sent_beam_scores = all_scores[indices]
802
+ sent_beam_tokens = all_tokens[indices]
803
+ sent_beam_indices = torch.cat((sent_beam_indices, new_indices))[indices]
804
+
805
+ return sent_beam_scores, sent_beam_tokens, sent_beam_indices
806
+
807
+ def finalize(
808
+ self,
809
+ input_ids: torch.LongTensor,
810
+ final_beam_scores: torch.FloatTensor,
811
+ final_beam_tokens: torch.LongTensor,
812
+ final_beam_indices: torch.LongTensor,
813
+ max_length: int,
814
+ pad_token_id: Optional[int] = None,
815
+ eos_token_id: Optional[Union[int, List[int]]] = None,
816
+ beam_indices: Optional[torch.LongTensor] = None,
817
+ decoder_prompt_len: Optional[int] = 0,
818
+ ) -> Tuple[torch.LongTensor]:
819
+ batch_size = len(self._beam_hyps)
820
+
821
+ if isinstance(eos_token_id, int):
822
+ eos_token_id = [eos_token_id]
823
+
824
+ # finalize all open beam hypotheses and add to generated hypotheses
825
+ for batch_idx, beam_hyp in enumerate(self._beam_hyps):
826
+ if self._done[batch_idx]:
827
+ continue
828
+
829
+ # all open beam hypotheses are added to the beam hypothesis
830
+ # beam hypothesis class automatically keeps the best beams
831
+
832
+ ids_collect = []
833
+ for beam_id in range(self.num_beams):
834
+ batch_beam_idx = batch_idx * self.num_beams + beam_id
835
+ final_score = final_beam_scores[batch_beam_idx].item()
836
+ final_tokens = input_ids[batch_beam_idx]
837
+
838
+ completes_constraint = self.check_completes_constraints(final_tokens.cpu().tolist())
839
+ if completes_constraint:
840
+ beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None
841
+ generated_len = final_tokens.shape[-1] - decoder_prompt_len
842
+ beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len)
843
+ ids_collect.append(beam_id)
844
+
845
+ # due to overly complex constraints or other factors, sometimes we can't gaurantee a successful
846
+ # generation. In these cases we simply return the highest scoring outputs.
847
+ if len(ids_collect) < self.num_beam_hyps_to_keep:
848
+ for beam_id in range(self.num_beams):
849
+ if beam_id not in ids_collect:
850
+ batch_beam_idx = batch_idx * self.num_beams + beam_id
851
+ final_score = final_beam_scores[batch_beam_idx].item()
852
+ final_tokens = input_ids[batch_beam_idx]
853
+ generated_len = final_tokens.shape[-1] - decoder_prompt_len
854
+ beam_hyp.add(final_tokens, final_score, generated_len=generated_len)
855
+ if len(ids_collect) >= self.num_beam_hyps_to_keep:
856
+ break
857
+
858
+ # select the best hypotheses
859
+ sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep)
860
+ best = []
861
+ best_indices = []
862
+ best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32)
863
+
864
+ # retrieve best hypotheses
865
+ for i, beam_hyp in enumerate(self._beam_hyps):
866
+ sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0])
867
+ for j in range(self.num_beam_hyps_to_keep):
868
+ best_hyp_tuple = sorted_hyps.pop()
869
+ best_score = best_hyp_tuple[0]
870
+ best_hyp = best_hyp_tuple[1]
871
+ best_index = best_hyp_tuple[2]
872
+ sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp)
873
+
874
+ # append to lists
875
+ best.append(best_hyp)
876
+
877
+ # append indices to list
878
+ best_indices.append(best_index)
879
+
880
+ best_scores[i * self.num_beam_hyps_to_keep + j] = best_score
881
+
882
+ # prepare for adding eos
883
+ sent_lengths_max = sent_lengths.max().item() + 1
884
+
885
+ sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max
886
+ decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
887
+
888
+ if len(best_indices) > 0 and best_indices[0] is not None:
889
+ indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len)
890
+ else:
891
+ indices = None
892
+
893
+ # shorter batches are padded if needed
894
+ if sent_lengths.min().item() != sent_lengths.max().item():
895
+ if pad_token_id is None:
896
+ raise ValueError("`pad_token_id` has to be defined")
897
+ decoded.fill_(pad_token_id)
898
+
899
+ if indices is not None:
900
+ indices.fill_(-1)
901
+
902
+ # fill with hypotheses and eos_token_id if the latter fits in
903
+ for i, (hypo, best_idx) in enumerate(zip(best, best_indices)):
904
+ decoded[i, : sent_lengths[i]] = hypo
905
+
906
+ if indices is not None:
907
+ indices[i, : len(best_idx)] = torch.tensor(best_idx)
908
+
909
+ if sent_lengths[i] < sent_max_len:
910
+ # inserting only the first eos_token_id
911
+ decoded[i, sent_lengths[i]] = eos_token_id[0]
912
+
913
+ return UserDict(
914
+ {
915
+ "sequences": decoded,
916
+ "sequence_scores": best_scores,
917
+ "beam_indices": indices,
918
+ }
919
+ )
920
+
921
+
922
+ class BeamHypotheses:
923
+ def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool, max_length: Optional[int] = None):
924
+ """
925
+ Initialize n-best list of hypotheses.
926
+ """
927
+ self.length_penalty = length_penalty
928
+ self.early_stopping = early_stopping
929
+ self.max_length = max_length
930
+ self.num_beams = num_beams
931
+ self.beams = []
932
+ self.worst_score = 1e9
933
+
934
+ if not isinstance(self.early_stopping, bool) and self.max_length is None:
935
+ raise ValueError(
936
+ "When `do_early_stopping` is set to a string, `max_length` must be defined. Ensure it is passed to the"
937
+ " BeamScorer class instance at initialization time."
938
+ )
939
+
940
+ def __len__(self):
941
+ """
942
+ Number of hypotheses in the list.
943
+ """
944
+ return len(self.beams)
945
+
946
+ def add(
947
+ self,
948
+ hyp: torch.LongTensor,
949
+ sum_logprobs: float,
950
+ beam_indices: Optional[torch.LongTensor] = None,
951
+ generated_len: Optional[int] = None,
952
+ ):
953
+ """
954
+ Add a new hypothesis to the list.
955
+ """
956
+ if generated_len is not None:
957
+ score = sum_logprobs / (generated_len**self.length_penalty)
958
+ # This 'else' case exists for retrocompatibility
959
+ else:
960
+ score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty)
961
+
962
+ if len(self) < self.num_beams or score > self.worst_score:
963
+ self.beams.append((score, hyp, beam_indices))
964
+ if len(self) > self.num_beams:
965
+ sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)])
966
+ del self.beams[sorted_next_scores[0][1]]
967
+ self.worst_score = sorted_next_scores[1][0]
968
+ else:
969
+ self.worst_score = min(score, self.worst_score)
970
+
971
+ def is_done(self, best_sum_logprobs: float, cur_len: int, decoder_prompt_len: Optional[int] = 0) -> bool:
972
+ """
973
+ If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst
974
+ one in the heap, then we are done with this sentence.
975
+ """
976
+
977
+ if len(self) < self.num_beams:
978
+ return False
979
+
980
+ # `True`: stop as soon as at least `num_beams` hypotheses are finished
981
+ if self.early_stopping is True:
982
+ return True
983
+ # `False`: heuristic -- compute best possible score from `cur_len`, even though it is not entirely accurate
984
+ # when `length_penalty` is positive. See the discussion below for more details.
985
+ # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
986
+ elif self.early_stopping is False:
987
+ highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty
988
+ ret = self.worst_score >= highest_attainable_score
989
+ return ret
990
+ # `"never"`: compute the best possible score, depending on the signal of `length_penalty`
991
+ else:
992
+ # `length_penalty` > 0.0 -> max denominator is obtaned from `max_length`, not from `cur_len` -> min
993
+ # abs(`highest_attainable_score`) is obtained -> `highest_attainable_score` is negative, hence we obtain
994
+ # its max this way
995
+ if self.length_penalty > 0.0:
996
+ if self.max_length <= decoder_prompt_len:
997
+ raise ValueError("max_length is not larger than decoder prompt length")
998
+ highest_attainable_score = (
999
+ best_sum_logprobs / (self.max_length - decoder_prompt_len) ** self.length_penalty
1000
+ )
1001
+ # the opposite logic applies here (max `highest_attainable_score` from `cur_len`)
1002
+ else:
1003
+ highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty
1004
+ ret = self.worst_score >= highest_attainable_score
1005
+ return ret
llmeval-env/lib/python3.10/site-packages/transformers/generation/candidate_generator.py ADDED
@@ -0,0 +1,425 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import copy
17
+ from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
18
+
19
+ import torch
20
+
21
+ from ..cache_utils import DynamicCache
22
+
23
+
24
+ if TYPE_CHECKING:
25
+ from ..modeling_utils import PreTrainedModel
26
+ from .configuration_utils import GenerationConfig
27
+ from .logits_process import LogitsProcessorList
28
+
29
+
30
+ class CandidateGenerator:
31
+ """Abstract base class for all candidate generators that can be applied during assisted generation."""
32
+
33
+ def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
34
+ """
35
+ Fetches the candidates to be tried for the current input.
36
+
37
+ Args:
38
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
39
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
40
+
41
+ Return:
42
+ `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
43
+ assessed by the model and, optionally, a `torch.FloatTensor` of shape `(batch_size, candidate_length,
44
+ vocabulary_size)` containing the logits associated to each candidate.
45
+ """
46
+ raise NotImplementedError(
47
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can call `get_candidates`."
48
+ )
49
+
50
+ def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
51
+ """
52
+ Updates the candidate generation strategy based on the outcomes.
53
+
54
+ Args:
55
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
56
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
57
+ scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
58
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
59
+ beam search or log softmax for each vocabulary token when using beam search
60
+ num_matches (`int`):
61
+ The number of matches between the candidate sequences and the model predictions.
62
+ """
63
+ raise NotImplementedError(
64
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can call "
65
+ "`update_candidate_strategy`."
66
+ )
67
+
68
+
69
+ class AssistedCandidateGenerator(CandidateGenerator):
70
+ """
71
+ `CandidateGenerator` class to be used for assisted generation and speculative decoding. This class generates
72
+ candidates through the use of a smaller model. Read the following blog post for more information:
73
+ https://huggingface.co/blog/assisted-generation
74
+
75
+ Args:
76
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
77
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
78
+ assistant_model (`PreTrainedModel`):
79
+ The model to be used for generating candidates. This model should be smaller than the main model.
80
+ generation_config (`~generation.GenerationConfig`, *optional*):
81
+ The generation configuration to be used as base parametrization for the generation call.
82
+ logits_processor (`LogitsProcessorList`):
83
+ An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
84
+ used to modify the prediction scores of the language modeling head applied at each generation step.
85
+ model_kwargs (`Dict`):
86
+ The keyword arguments that will be passed to the main model, and are used as base inputs for the assistant
87
+ model as well.
88
+ inputs_tensor (`torch.Tensor`, *optional*):
89
+ The model input tensor. In encoder-decoder models, this is the encoder input.
90
+ """
91
+
92
+ def __init__(
93
+ self,
94
+ input_ids: torch.LongTensor,
95
+ assistant_model: "PreTrainedModel",
96
+ generation_config: "GenerationConfig",
97
+ logits_processor: "LogitsProcessorList",
98
+ model_kwargs: Dict,
99
+ inputs_tensor: Optional[torch.Tensor] = None,
100
+ ):
101
+ # Make sure all data at the same device as assistant model
102
+ device = assistant_model.device
103
+ input_ids = input_ids.to(device)
104
+ if inputs_tensor is not None:
105
+ inputs_tensor = inputs_tensor.to(device)
106
+
107
+ # Prepare the assistant and the starting number of candidate tokens
108
+ self.assistant_model = assistant_model
109
+ self.num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens
110
+
111
+ # Prepare the kwargs for the assistant model
112
+ assistant_kwargs = {}
113
+ for key, value in model_kwargs.items(): # deepcopy crashes if we attempt to copy encoder outputs with grads
114
+ if key not in ("encoder_outputs", "assistant_encoder_outputs"):
115
+ assistant_kwargs[key] = (
116
+ value.detach().to(device) if isinstance(value, torch.Tensor) else copy.deepcopy(value)
117
+ )
118
+
119
+ if "assistant_encoder_outputs" in model_kwargs:
120
+ assistant_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"]
121
+ elif assistant_model.config.is_encoder_decoder:
122
+ inputs_tensor, model_input_name, assistant_kwargs = assistant_model._prepare_model_inputs(
123
+ inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_kwargs
124
+ )
125
+ assistant_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation(
126
+ inputs_tensor, assistant_kwargs, model_input_name
127
+ )
128
+ elif "encoder_outputs" in model_kwargs:
129
+ assistant_kwargs["encoder_outputs"] = model_kwargs["encoder_outputs"]
130
+ self.assistant_kwargs = assistant_kwargs
131
+
132
+ # Prepare assistant model's keys of inputs
133
+ if assistant_model.config.is_encoder_decoder:
134
+ # both are encoder-decoder
135
+ self.input_ids_key = "decoder_input_ids"
136
+ elif "encoder_outputs" in assistant_kwargs:
137
+ # special case for encoder-decoder with decoder-only assistant (like DistilWhisper)
138
+ self.input_ids_key = "input_ids"
139
+ self.assistant_kwargs["attention_mask"] = self.assistant_kwargs.get(
140
+ "decoder_attention_mask",
141
+ torch.ones((input_ids.shape[0], 1), device=input_ids.device, dtype=torch.long),
142
+ )
143
+ else:
144
+ # both are decoder-only
145
+ self.input_ids_key = "input_ids"
146
+
147
+ # Prepare generation-related options.
148
+ self.logits_processor = logits_processor
149
+ self.generation_config = copy.deepcopy(generation_config)
150
+ self.generation_config.return_dict_in_generate = True
151
+ self.generation_config.output_scores = True
152
+
153
+ # avoid unnecessary warnings that min_length is larger than max_new_tokens
154
+ self.main_model_min_length = self.generation_config.min_length
155
+ self.generation_config.min_length = 0
156
+ self.generation_config.min_new_tokens = None
157
+
158
+ def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
159
+ """
160
+ Fetches the candidates to be tried for the current input.
161
+
162
+ Args:
163
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
164
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
165
+
166
+ Return:
167
+ `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
168
+ assessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length,
169
+ vocabulary_size)` containing the logits associated to each candidate.
170
+ """
171
+ input_ids = input_ids.to(self.assistant_model.device)
172
+
173
+ # Don't generate more than `max_length - 1` candidates since the target model generates one extra token.
174
+ new_cur_len = input_ids.shape[-1]
175
+ max_new_tokens = min(int(self.num_assistant_tokens), self.generation_config.max_length - new_cur_len - 1)
176
+ min_new_tokens = max(min(max_new_tokens, self.main_model_min_length - new_cur_len), 0)
177
+ if max_new_tokens == 0:
178
+ return input_ids, None
179
+
180
+ # 1. If it is not the first round of candidate generation, prepare the inputs based on the input_ids length
181
+ # (which implicitly contains the number of accepted candidates from the previous round)
182
+ has_past_key_values = self.assistant_kwargs.get("past_key_values", None) is not None
183
+ if has_past_key_values:
184
+ new_cache_size = new_cur_len - 1
185
+ self.assistant_kwargs["past_key_values"] = _crop_past_key_values(
186
+ self.assistant_model, self.assistant_kwargs["past_key_values"], new_cache_size - 1
187
+ ) # the assistant does not have the token after the last match, hence the -1
188
+
189
+ self.assistant_kwargs = _prepare_attention_mask(
190
+ self.assistant_kwargs, new_cur_len, self.assistant_model.config.is_encoder_decoder
191
+ )
192
+ self.assistant_kwargs = _prepare_token_type_ids(self.assistant_kwargs, new_cur_len)
193
+
194
+ # 2. Forecast next N tokens using the assistant model.
195
+ assistant_generation_kwargs = {
196
+ self.input_ids_key: input_ids,
197
+ "min_new_tokens": min_new_tokens,
198
+ "max_new_tokens": max_new_tokens,
199
+ "generation_config": self.generation_config,
200
+ "logits_processor": self.logits_processor,
201
+ }
202
+
203
+ assistant_output = self.assistant_model.generate(**assistant_generation_kwargs, **self.assistant_kwargs)
204
+
205
+ # 3. Update variables for the next round of candidate generation
206
+ self.assistant_kwargs["past_key_values"] = assistant_output.past_key_values
207
+
208
+ # 4. Prepare variables for output
209
+ candidate_logits = torch.stack(assistant_output.scores, dim=1)
210
+ candidate_ids = assistant_output.sequences
211
+ return candidate_ids, candidate_logits
212
+
213
+ def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
214
+ """
215
+ Updates the candidate generation strategy based on the outcomes.
216
+
217
+ Args:
218
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
219
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
220
+ scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
221
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
222
+ beam search or log softmax for each vocabulary token when using beam search
223
+ num_matches (`int`):
224
+ The number of matches between the candidate sequences and the model predictions.
225
+ """
226
+ # Adjust the max number of assistant tokens to use in the next iteration. This is a simple heuristic,
227
+ # probably can be improved -- we want to balance the benefits of getting assistant tokens correct with the
228
+ # cost of forecasting incorrect assistant tokens.
229
+ if self.assistant_model.generation_config.num_assistant_tokens_schedule in {
230
+ "heuristic",
231
+ "heuristic_transient",
232
+ }:
233
+ if num_matches == int(self.num_assistant_tokens):
234
+ self.num_assistant_tokens += 2.0
235
+ else:
236
+ self.num_assistant_tokens = max(1.0, self.num_assistant_tokens - 1.0)
237
+
238
+
239
+ class PromptLookupCandidateGenerator(CandidateGenerator):
240
+ """
241
+ `CandidateGenerator` class to be used for prompt lookup generation. This class generates candidates by looking up
242
+ likely continuations in the provided prompt (input_ids) itself.
243
+ Read the following blog post for more information: https://github.com/apoorvumang/prompt-lookup-decoding
244
+
245
+ Args:
246
+ max_matching_ngram_size (`int`):
247
+ The maximum ngram size to be considered for matching in the prompt
248
+ num_output_tokens (`int`):
249
+ The number of tokens to be output as candidate tokens.
250
+ max_length (`int`):
251
+ The number of total maximum tokens that can be generated. For decoder-only models that includes the prompt length.
252
+ Defaults to 20, which is the max length used as default in generation config.
253
+ """
254
+
255
+ def __init__(
256
+ self,
257
+ num_output_tokens: int = 10,
258
+ max_matching_ngram_size: int = None,
259
+ max_length: int = 20,
260
+ ):
261
+ self.num_output_tokens = num_output_tokens
262
+ self.max_matching_ngram_size = max_matching_ngram_size if max_matching_ngram_size else 2
263
+ self.max_length = max_length
264
+
265
+ if self.max_matching_ngram_size <= 0 or self.num_output_tokens <= 0:
266
+ raise ValueError("Invalid max_matching_ngram_size or num_output_tokens")
267
+
268
+ def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
269
+ """
270
+ Fetches the candidates to be tried for the current input.
271
+
272
+ Args:
273
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
274
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
275
+
276
+ Return:
277
+ `torch.LongTensor` of shape `(num_candidates, candidate_length)`: The candidate sequences to be tried.
278
+ """
279
+ input_length = input_ids.size(1)
280
+
281
+ # Don't generate more than `max_length - 1` candidates since the target model generates one extra token.
282
+ if self.max_length == input_length + 1:
283
+ return input_ids, None
284
+
285
+ chosen_ids = None
286
+ match_found = False
287
+ for ngram_size in range(min(self.max_matching_ngram_size, input_length - 1), 0, -1):
288
+ # Create sliding windows of size ngram_size
289
+ windows = input_ids.unfold(dimension=1, size=ngram_size, step=1)
290
+
291
+ # Convert ngram to a tensor for comparison
292
+ ngram_tensor = input_ids[0, -ngram_size:]
293
+
294
+ # Find where the windows match the ngram
295
+ matches = (windows == ngram_tensor).all(dim=2)
296
+
297
+ # Get the indices of matches
298
+ match_indices = matches.nonzero(as_tuple=True)[1]
299
+
300
+ # Iterate through match indices to find a valid continuation
301
+ for idx in match_indices:
302
+ start_idx = idx + ngram_size
303
+ end_idx = start_idx + self.num_output_tokens
304
+ end_idx = min(end_idx, input_length, self.max_length)
305
+
306
+ if start_idx < end_idx:
307
+ chosen_ids = input_ids[0, start_idx:end_idx]
308
+ match_found = True
309
+ break
310
+ if match_found:
311
+ break
312
+
313
+ if chosen_ids is None or len(chosen_ids) == 0:
314
+ # In case we didn't find a match return the input sequence unchanged, reverts back to autoregressive decoding
315
+ return input_ids, None
316
+
317
+ # Now need extend input_ids with chosen_ids
318
+ chosen_ids = chosen_ids.unsqueeze(0)
319
+ candidate_input_ids = torch.cat((input_ids, chosen_ids), dim=1)
320
+ # assisted_generation expects logits as well, but we don't have those here, so returning None
321
+ return candidate_input_ids, None
322
+
323
+ def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
324
+ """
325
+ Updates the candidate generation strategy based on the outcomes.
326
+
327
+ Args:
328
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
329
+ Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
330
+ scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
331
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
332
+ beam search or log softmax for each vocabulary token when using beam search
333
+ num_matches (`int`):
334
+ The number of matches between the candidate sequences and the model predictions.
335
+ """
336
+ # Currently does nothing
337
+ return
338
+
339
+
340
+ def _crop_past_key_values(model, past_key_values, maximum_length):
341
+ """Crops the past key values up to a certain maximum length."""
342
+ new_past = []
343
+ if model.config.is_encoder_decoder:
344
+ for idx in range(len(past_key_values)):
345
+ new_past.append(
346
+ (
347
+ past_key_values[idx][0][:, :, :maximum_length, :],
348
+ past_key_values[idx][1][:, :, :maximum_length, :],
349
+ past_key_values[idx][2],
350
+ past_key_values[idx][3],
351
+ )
352
+ )
353
+ past_key_values = tuple(new_past)
354
+ # bloom is special
355
+ elif "bloom" in model.__class__.__name__.lower() or (
356
+ model.config.architectures is not None and "bloom" in model.config.architectures[0].lower()
357
+ ):
358
+ for idx in range(len(past_key_values)):
359
+ new_past.append(
360
+ (
361
+ past_key_values[idx][0][:, :, :maximum_length],
362
+ past_key_values[idx][1][:, :maximum_length, :],
363
+ )
364
+ )
365
+ past_key_values = tuple(new_past)
366
+ # gptbigcode is too
367
+ elif "gptbigcode" in model.__class__.__name__.lower() or (
368
+ model.config.architectures is not None and "gptbigcode" in model.config.architectures[0].lower()
369
+ ):
370
+ if model.config.multi_query:
371
+ for idx in range(len(past_key_values)):
372
+ past_key_values[idx] = past_key_values[idx][:, :maximum_length, :]
373
+ else:
374
+ for idx in range(len(past_key_values)):
375
+ past_key_values[idx] = past_key_values[idx][:, :, :maximum_length, :]
376
+ elif isinstance(past_key_values, DynamicCache):
377
+ for idx in range(len(past_key_values.key_cache)):
378
+ if past_key_values.value_cache[idx].shape[-1] != 0:
379
+ past_key_values.key_cache[idx] = past_key_values.key_cache[idx][:, :, :maximum_length, :]
380
+ past_key_values.value_cache[idx] = past_key_values.value_cache[idx][:, :, :maximum_length, :]
381
+
382
+ elif past_key_values is not None:
383
+ for idx in range(len(past_key_values)):
384
+ new_past.append(
385
+ (
386
+ past_key_values[idx][0][:, :, :maximum_length, :],
387
+ past_key_values[idx][1][:, :, :maximum_length, :],
388
+ )
389
+ )
390
+ past_key_values = tuple(new_past)
391
+ return past_key_values
392
+
393
+
394
+ def _prepare_attention_mask(model_kwargs: Dict[str, Any], new_length: int, is_encoder_decoder: bool) -> Dict[str, Any]:
395
+ """Expands or crops the model's mask for decoding purposes, to the defined length"""
396
+
397
+ mask_key = "decoder_attention_mask" if is_encoder_decoder else "attention_mask"
398
+ if mask_key not in model_kwargs:
399
+ return model_kwargs
400
+
401
+ mask = model_kwargs[mask_key]
402
+ mask_length_diff = new_length - mask.shape[1]
403
+
404
+ if mask_length_diff < 0:
405
+ model_kwargs[mask_key] = mask[:, :mask_length_diff]
406
+ elif mask_length_diff > 0:
407
+ model_kwargs[mask_key] = torch.cat([mask, mask.new_ones((mask.shape[0], mask_length_diff))], dim=-1)
408
+ return model_kwargs
409
+
410
+
411
+ def _prepare_token_type_ids(model_kwargs: Dict[str, Any], new_length: int) -> Dict[str, Any]:
412
+ """Expands or crops the model's token_type_ids for decoding purposes, to the defined length"""
413
+ if "token_type_ids" not in model_kwargs or model_kwargs["token_type_ids"] is None:
414
+ return model_kwargs
415
+
416
+ token_type_ids = model_kwargs["token_type_ids"]
417
+ final_token_type = token_type_ids[:, -1].unsqueeze(-1)
418
+ type_length_diff = new_length - token_type_ids.shape[1]
419
+
420
+ if type_length_diff < 0:
421
+ token_type_ids = token_type_ids[:, :type_length_diff]
422
+ elif type_length_diff > 0:
423
+ token_type_copies = final_token_type.repeat(1, type_length_diff)
424
+ model_kwargs["token_type_ids"] = torch.cat([model_kwargs["token_type_ids"], token_type_copies], dim=-1)
425
+ return model_kwargs
llmeval-env/lib/python3.10/site-packages/transformers/generation/configuration_utils.py ADDED
@@ -0,0 +1,1092 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Generation configuration class and utilities."""
16
+
17
+ import copy
18
+ import json
19
+ import os
20
+ import warnings
21
+ from typing import TYPE_CHECKING, Any, Dict, Optional, Union
22
+
23
+ from .. import __version__
24
+ from ..configuration_utils import PretrainedConfig
25
+ from ..utils import (
26
+ GENERATION_CONFIG_NAME,
27
+ ExplicitEnum,
28
+ PushToHubMixin,
29
+ cached_file,
30
+ download_url,
31
+ extract_commit_hash,
32
+ is_remote_url,
33
+ logging,
34
+ )
35
+
36
+
37
+ if TYPE_CHECKING:
38
+ from ..modeling_utils import PreTrainedModel
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+ METADATA_FIELDS = ("_from_model_config", "_commit_hash", "_original_object_hash", "transformers_version")
43
+
44
+
45
+ class GenerationMode(ExplicitEnum):
46
+ """
47
+ Possible generation modes, downstream of the [`~generation.GenerationMixin.generate`] method.
48
+ """
49
+
50
+ # Non-beam methods
51
+ CONTRASTIVE_SEARCH = "contrastive_search"
52
+ GREEDY_SEARCH = "greedy_search"
53
+ SAMPLE = "sample"
54
+ ASSISTED_GENERATION = "assisted_generation"
55
+ # Beam methods
56
+ BEAM_SEARCH = "beam_search"
57
+ BEAM_SAMPLE = "beam_sample"
58
+ CONSTRAINED_BEAM_SEARCH = "constrained_beam_search"
59
+ GROUP_BEAM_SEARCH = "group_beam_search"
60
+
61
+
62
+ class GenerationConfig(PushToHubMixin):
63
+ # no-format
64
+ r"""
65
+ Class that holds a configuration for a generation task. A `generate` call supports the following generation methods
66
+ for text-decoder, text-to-text, speech-to-text, and vision-to-text models:
67
+
68
+ - *greedy decoding* by calling [`~generation.GenerationMixin._greedy_search`] if `num_beams=1` and
69
+ `do_sample=False`
70
+ - *contrastive search* by calling [`~generation.GenerationMixin._contrastive_search`] if `penalty_alpha>0.`
71
+ and `top_k>1`
72
+ - *multinomial sampling* by calling [`~generation.GenerationMixin._sample`] if `num_beams=1` and
73
+ `do_sample=True`
74
+ - *beam-search decoding* by calling [`~generation.GenerationMixin._beam_search`] if `num_beams>1` and
75
+ `do_sample=False`
76
+ - *beam-search multinomial sampling* by calling [`~generation.GenerationMixin._beam_sample`] if
77
+ `num_beams>1` and `do_sample=True`
78
+ - *diverse beam-search decoding* by calling [`~generation.GenerationMixin._group_beam_search`], if
79
+ `num_beams>1` and `num_beam_groups>1`
80
+ - *constrained beam-search decoding* by calling [`~generation.GenerationMixin._constrained_beam_search`], if
81
+ `constraints!=None` or `force_words_ids!=None`
82
+ - *assisted decoding* by calling [`~generation.GenerationMixin._assisted_decoding`], if
83
+ `assistant_model` or `prompt_lookup_num_tokens` is passed to `.generate()`
84
+
85
+ You do not need to call any of the above methods directly. Pass custom parameter values to '.generate()'. To learn
86
+ more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
87
+
88
+ <Tip>
89
+
90
+ A large number of these flags control the logits or the stopping criteria of the generation. Make sure you check
91
+ the [generate-related classes](https://huggingface.co/docs/transformers/internal/generation_utils) for a full
92
+ description of the possible manipulations, as well as examples of their usage.
93
+
94
+ </Tip>
95
+
96
+ Arg:
97
+ > Parameters that control the length of the output
98
+
99
+ max_length (`int`, *optional*, defaults to 20):
100
+ The maximum length the generated tokens can have. Corresponds to the length of the input prompt +
101
+ `max_new_tokens`. Its effect is overridden by `max_new_tokens`, if also set.
102
+ max_new_tokens (`int`, *optional*):
103
+ The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt.
104
+ min_length (`int`, *optional*, defaults to 0):
105
+ The minimum length of the sequence to be generated. Corresponds to the length of the input prompt +
106
+ `min_new_tokens`. Its effect is overridden by `min_new_tokens`, if also set.
107
+ min_new_tokens (`int`, *optional*):
108
+ The minimum numbers of tokens to generate, ignoring the number of tokens in the prompt.
109
+ early_stopping (`bool` or `str`, *optional*, defaults to `False`):
110
+ Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values:
111
+ `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an
112
+ heuristic is applied and the generation stops when is it very unlikely to find better candidates;
113
+ `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical
114
+ beam search algorithm).
115
+ max_time(`float`, *optional*):
116
+ The maximum amount of time you allow the computation to run for in seconds. generation will still finish
117
+ the current pass after allocated time has been passed.
118
+
119
+ > Parameters that control the generation strategy used
120
+
121
+ do_sample (`bool`, *optional*, defaults to `False`):
122
+ Whether or not to use sampling ; use greedy decoding otherwise.
123
+ num_beams (`int`, *optional*, defaults to 1):
124
+ Number of beams for beam search. 1 means no beam search.
125
+ num_beam_groups (`int`, *optional*, defaults to 1):
126
+ Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams.
127
+ [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details.
128
+ penalty_alpha (`float`, *optional*):
129
+ The values balance the model confidence and the degeneration penalty in contrastive search decoding.
130
+ use_cache (`bool`, *optional*, defaults to `True`):
131
+ Whether or not the model should use the past last key/values attentions (if applicable to the model) to
132
+ speed up decoding.
133
+
134
+ > Parameters for manipulation of the model output logits
135
+
136
+ temperature (`float`, *optional*, defaults to 1.0):
137
+ The value used to modulate the next token probabilities.
138
+ top_k (`int`, *optional*, defaults to 50):
139
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
140
+ top_p (`float`, *optional*, defaults to 1.0):
141
+ If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to
142
+ `top_p` or higher are kept for generation.
143
+ typical_p (`float`, *optional*, defaults to 1.0):
144
+ Local typicality measures how similar the conditional probability of predicting a target token next is to
145
+ the expected conditional probability of predicting a random token next, given the partial text already
146
+ generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that
147
+ add up to `typical_p` or higher are kept for generation. See [this
148
+ paper](https://arxiv.org/pdf/2202.00666.pdf) for more details.
149
+ epsilon_cutoff (`float`, *optional*, defaults to 0.0):
150
+ If set to float strictly between 0 and 1, only tokens with a conditional probability greater than
151
+ `epsilon_cutoff` will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the
152
+ size of the model. See [Truncation Sampling as Language Model
153
+ Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
154
+ eta_cutoff (`float`, *optional*, defaults to 0.0):
155
+ Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between
156
+ 0 and 1, a token is only considered if it is greater than either `eta_cutoff` or `sqrt(eta_cutoff) *
157
+ exp(-entropy(softmax(next_token_logits)))`. The latter term is intuitively the expected next token
158
+ probability, scaled by `sqrt(eta_cutoff)`. In the paper, suggested values range from 3e-4 to 2e-3,
159
+ depending on the size of the model. See [Truncation Sampling as Language Model
160
+ Desmoothing](https://arxiv.org/abs/2210.15191) for more details.
161
+ diversity_penalty (`float`, *optional*, defaults to 0.0):
162
+ This value is subtracted from a beam's score if it generates a token same as any beam from other group at a
163
+ particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled.
164
+ repetition_penalty (`float`, *optional*, defaults to 1.0):
165
+ The parameter for repetition penalty. 1.0 means no penalty. See [this
166
+ paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
167
+ encoder_repetition_penalty (`float`, *optional*, defaults to 1.0):
168
+ The paramater for encoder_repetition_penalty. An exponential penalty on sequences that are not in the
169
+ original input. 1.0 means no penalty.
170
+ length_penalty (`float`, *optional*, defaults to 1.0):
171
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
172
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
173
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
174
+ `length_penalty` < 0.0 encourages shorter sequences.
175
+ no_repeat_ngram_size (`int`, *optional*, defaults to 0):
176
+ If set to int > 0, all ngrams of that size can only occur once.
177
+ bad_words_ids(`List[List[int]]`, *optional*):
178
+ List of list of token ids that are not allowed to be generated. Check
179
+ [`~generation.NoBadWordsLogitsProcessor`] for further documentation and examples.
180
+ force_words_ids(`List[List[int]]` or `List[List[List[int]]]`, *optional*):
181
+ List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of
182
+ words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this
183
+ triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one
184
+ can allow different forms of each word.
185
+ renormalize_logits (`bool`, *optional*, defaults to `False`):
186
+ Whether to renormalize the logits after applying all the logits processors or warpers (including the custom
187
+ ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits
188
+ are normalized but some logit processors or warpers break the normalization.
189
+ constraints (`List[Constraint]`, *optional*):
190
+ Custom constraints that can be added to the generation to ensure that the output will contain the use of
191
+ certain tokens as defined by `Constraint` objects, in the most sensible way possible.
192
+ forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`):
193
+ The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for
194
+ multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target
195
+ language token.
196
+ forced_eos_token_id (`Union[int, List[int]]`, *optional*, defaults to `model.config.forced_eos_token_id`):
197
+ The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a
198
+ list to set multiple *end-of-sequence* tokens.
199
+ remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`):
200
+ Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash.
201
+ Note that using `remove_invalid_values` can slow down generation.
202
+ exponential_decay_length_penalty (`tuple(int, float)`, *optional*):
203
+ This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been
204
+ generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where
205
+ penalty starts and `decay_factor` represents the factor of exponential decay
206
+ suppress_tokens (`List[int]`, *optional*):
207
+ A list of tokens that will be suppressed at generation. The `SupressTokens` logit processor will set their
208
+ log probs to `-inf` so that they are not sampled.
209
+ begin_suppress_tokens (`List[int]`, *optional*):
210
+ A list of tokens that will be suppressed at the beginning of the generation. The `SupressBeginTokens` logit
211
+ processor will set their log probs to `-inf` so that they are not sampled.
212
+ forced_decoder_ids (`List[List[int]]`, *optional*):
213
+ A list of pairs of integers which indicates a mapping from generation indices to token indices that will be
214
+ forced before sampling. For example, `[[1, 123]]` means the second generated token will always be a token
215
+ of index 123.
216
+ sequence_bias (`Dict[Tuple[int], float]`, *optional*)):
217
+ Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the
218
+ sequence being selected, while negative biases do the opposite. Check
219
+ [`~generation.SequenceBiasLogitsProcessor`] for further documentation and examples.
220
+ guidance_scale (`float`, *optional*):
221
+ The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`.
222
+ Higher guidance scale encourages the model to generate samples that are more closely linked to the input
223
+ prompt, usually at the expense of poorer quality.
224
+ low_memory (`bool`, *optional*):
225
+ Switch to sequential beam search and sequential topk for contrastive search to reduce peak memory.
226
+ Used with beam search and contrastive search.
227
+
228
+
229
+ > Parameters that define the output variables of `generate`
230
+
231
+ num_return_sequences(`int`, *optional*, defaults to 1):
232
+ The number of independently computed returned sequences for each element in the batch.
233
+ output_attentions (`bool`, *optional*, defaults to `False`):
234
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
235
+ tensors for more details.
236
+ output_hidden_states (`bool`, *optional*, defaults to `False`):
237
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
238
+ more details.
239
+ output_scores (`bool`, *optional*, defaults to `False`):
240
+ Whether or not to return the prediction scores. See `scores` under returned tensors for more details.
241
+ output_logits (`bool`, *optional*):
242
+ Whether or not to return the unprocessed prediction logit scores. See `logits` under returned tensors for
243
+ more details.
244
+ return_dict_in_generate (`bool`, *optional*, defaults to `False`):
245
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
246
+
247
+ > Special tokens that can be used at generation time
248
+
249
+ pad_token_id (`int`, *optional*):
250
+ The id of the *padding* token.
251
+ bos_token_id (`int`, *optional*):
252
+ The id of the *beginning-of-sequence* token.
253
+ eos_token_id (`Union[int, List[int]]`, *optional*):
254
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
255
+
256
+ > Generation parameters exclusive to encoder-decoder models
257
+
258
+ encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0):
259
+ If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the
260
+ `decoder_input_ids`.
261
+ decoder_start_token_id (`Union[int, List[int]]`, *optional*):
262
+ If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token or a list of length
263
+ `batch_size`. Indicating a list enables different start ids for each element in the batch
264
+ (e.g. multilingual models with different target languages in one batch)
265
+
266
+
267
+ > Generation parameters exclusive to [assistant generation](https://arxiv.org/abs/2211.17192)
268
+
269
+ num_assistant_tokens (`int`, *optional*, defaults to 5):
270
+ Defines the number of _speculative tokens_ that shall be generated by the assistant model before being
271
+ checked by the target model at each iteration. Higher values for `num_assistant_tokens` make the generation
272
+ more _speculative_ : If the assistant model is performant larger speed-ups can be reached, if the assistant
273
+ model requires lots of corrections, lower speed-ups are reached.
274
+
275
+ num_assistant_tokens_schedule (`str`, *optional*, defaults to `"heuristic"`):
276
+ Defines the schedule at which max assistant tokens shall be changed during inference.
277
+ - `"heuristic"`: When all speculative tokens are correct, increase `num_assistant_tokens` by 2 else
278
+ reduce by 1. `num_assistant_tokens` value is persistent over multiple generation calls with the same assistant model.
279
+ - `"heuristic_transient"`: Same as `"heuristic"` but `num_assistant_tokens` is reset to its initial value after each generation call.
280
+ - `"constant"`: `num_assistant_tokens` stays unchanged during generation
281
+
282
+ prompt_lookup_num_tokens (`int`, *optional*, default to `None`):
283
+ The number of tokens to be output as candidate tokens.
284
+
285
+ max_matching_ngram_size (`int`, *optional*, default to `None`):
286
+ The maximum ngram size to be considered for matching in the prompt. Default to 2 if not provided.
287
+
288
+ > Parameters specific to the caching mechanism:
289
+
290
+ cache_implementation (`str`, *optional*, default to `None`):
291
+ Cache class that should be used when generating.
292
+
293
+
294
+ > Wild card
295
+
296
+ generation_kwargs:
297
+ Additional generation kwargs will be forwarded to the `generate` function of the model. Kwargs that are not
298
+ present in `generate`'s signature will be used in the model forward pass.
299
+ """
300
+
301
+ def __init__(self, **kwargs):
302
+ # Parameters that control the length of the output
303
+ self.max_length = kwargs.pop("max_length", 20)
304
+ self.max_new_tokens = kwargs.pop("max_new_tokens", None)
305
+ self.min_length = kwargs.pop("min_length", 0)
306
+ self.min_new_tokens = kwargs.pop("min_new_tokens", None)
307
+ self.early_stopping = kwargs.pop("early_stopping", False)
308
+ self.max_time = kwargs.pop("max_time", None)
309
+
310
+ # Parameters that control the generation strategy used
311
+ self.do_sample = kwargs.pop("do_sample", False)
312
+ self.num_beams = kwargs.pop("num_beams", 1)
313
+ self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
314
+ self.penalty_alpha = kwargs.pop("penalty_alpha", None)
315
+ self.use_cache = kwargs.pop("use_cache", True)
316
+
317
+ # Parameters for manipulation of the model output logits
318
+ self.temperature = kwargs.pop("temperature", 1.0)
319
+ self.top_k = kwargs.pop("top_k", 50)
320
+ self.top_p = kwargs.pop("top_p", 1.0)
321
+ self.typical_p = kwargs.pop("typical_p", 1.0)
322
+ self.epsilon_cutoff = kwargs.pop("epsilon_cutoff", 0.0)
323
+ self.eta_cutoff = kwargs.pop("eta_cutoff", 0.0)
324
+ self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
325
+ self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
326
+ self.encoder_repetition_penalty = kwargs.pop("encoder_repetition_penalty", 1.0)
327
+ self.length_penalty = kwargs.pop("length_penalty", 1.0)
328
+ self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
329
+ self.bad_words_ids = kwargs.pop("bad_words_ids", None)
330
+ self.force_words_ids = kwargs.pop("force_words_ids", None)
331
+ self.renormalize_logits = kwargs.pop("renormalize_logits", False)
332
+ self.constraints = kwargs.pop("constraints", None)
333
+ self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
334
+ self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
335
+ self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
336
+ self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None)
337
+ self.suppress_tokens = kwargs.pop("suppress_tokens", None)
338
+ self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None)
339
+ self.forced_decoder_ids = kwargs.pop("forced_decoder_ids", None)
340
+ self.sequence_bias = kwargs.pop("sequence_bias", None)
341
+ self.guidance_scale = kwargs.pop("guidance_scale", None)
342
+ self.low_memory = kwargs.pop("low_memory", None)
343
+
344
+ # Parameters that define the output variables of `generate`
345
+ self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
346
+ self.output_attentions = kwargs.pop("output_attentions", False)
347
+ self.output_hidden_states = kwargs.pop("output_hidden_states", False)
348
+ self.output_scores = kwargs.pop("output_scores", False)
349
+ self.output_logits = kwargs.pop("output_logits", None)
350
+ self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
351
+
352
+ # Special tokens that can be used at generation time
353
+ self.pad_token_id = kwargs.pop("pad_token_id", None)
354
+ self.bos_token_id = kwargs.pop("bos_token_id", None)
355
+ self.eos_token_id = kwargs.pop("eos_token_id", None)
356
+
357
+ # Generation parameters exclusive to encoder-decoder models
358
+ self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
359
+ self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
360
+
361
+ # Assistant generation
362
+ self.num_assistant_tokens = kwargs.pop("num_assistant_tokens", 5)
363
+ self.num_assistant_tokens_schedule = kwargs.pop("num_assistant_tokens_schedule", "heuristic")
364
+
365
+ # Cache implementation
366
+ self.cache_implementation = kwargs.pop("cache_implementation", None)
367
+
368
+ # Prompt lookup decoding
369
+ self.prompt_lookup_num_tokens = kwargs.pop("prompt_lookup_num_tokens", None)
370
+ self.max_matching_ngram_size = kwargs.pop("max_matching_ngram_size", None)
371
+
372
+ # Wild card
373
+ self.generation_kwargs = kwargs.pop("generation_kwargs", {})
374
+
375
+ # The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the hub
376
+ # interface.
377
+ self._from_model_config = kwargs.pop("_from_model_config", False)
378
+ self._commit_hash = kwargs.pop("_commit_hash", None)
379
+ self.transformers_version = kwargs.pop("transformers_version", __version__)
380
+
381
+ # Additional attributes without default values
382
+ if not self._from_model_config:
383
+ # we don't want to copy values from the model config if we're initializing a `GenerationConfig` from a
384
+ # model's default configuration file
385
+ for key, value in kwargs.items():
386
+ try:
387
+ setattr(self, key, value)
388
+ except AttributeError as err:
389
+ logger.error(f"Can't set {key} with value {value} for {self}")
390
+ raise err
391
+
392
+ # Validate the values of the attributes
393
+ self.validate(is_init=True)
394
+
395
+ def __hash__(self):
396
+ return hash(self.to_json_string(ignore_metadata=True))
397
+
398
+ def __eq__(self, other):
399
+ if not isinstance(other, GenerationConfig):
400
+ return False
401
+
402
+ self_without_metadata = self.to_json_string(use_diff=False, ignore_metadata=True)
403
+ other_without_metadata = other.to_json_string(use_diff=False, ignore_metadata=True)
404
+ return self_without_metadata == other_without_metadata
405
+
406
+ def __repr__(self):
407
+ return f"{self.__class__.__name__} {self.to_json_string(ignore_metadata=True)}"
408
+
409
+ def get_generation_mode(self, assistant_model: Optional["PreTrainedModel"] = None) -> GenerationMode:
410
+ """
411
+ Returns the generation mode triggered by the [`GenerationConfig`] instance.
412
+
413
+ Arg:
414
+ assistant_model (`PreTrainedModel`, *optional*):
415
+ The assistant model to be used for assisted generation. If set, the generation mode will be
416
+ assisted generation.
417
+
418
+ Returns:
419
+ `GenerationMode`: The generation mode triggered by the instance.
420
+ """
421
+ # TODO joao: find out a way of not depending on external fields (e.g. `assistant_model`), then make this a
422
+ # property and part of the `__repr__`
423
+ if self.constraints is not None or self.force_words_ids is not None:
424
+ generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH
425
+ elif self.num_beams == 1:
426
+ if self.do_sample is False:
427
+ if (
428
+ self.top_k is not None
429
+ and self.top_k > 1
430
+ and self.penalty_alpha is not None
431
+ and self.penalty_alpha > 0
432
+ ):
433
+ generation_mode = GenerationMode.CONTRASTIVE_SEARCH
434
+ else:
435
+ generation_mode = GenerationMode.GREEDY_SEARCH
436
+ else:
437
+ generation_mode = GenerationMode.SAMPLE
438
+ else:
439
+ if self.num_beam_groups > 1:
440
+ generation_mode = GenerationMode.GROUP_BEAM_SEARCH
441
+ elif self.do_sample is True:
442
+ generation_mode = GenerationMode.BEAM_SAMPLE
443
+ else:
444
+ generation_mode = GenerationMode.BEAM_SEARCH
445
+
446
+ # Assisted generation may extend some generation modes
447
+ if assistant_model is not None or self.prompt_lookup_num_tokens is not None:
448
+ if generation_mode in ("greedy_search", "sample"):
449
+ generation_mode = GenerationMode.ASSISTED_GENERATION
450
+ else:
451
+ raise ValueError(
452
+ "You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate "
453
+ "is only supported with Greedy Search and Sample."
454
+ )
455
+ return generation_mode
456
+
457
+ def validate(self, is_init=False):
458
+ """
459
+ Validates the values of the attributes of the [`GenerationConfig`] instance. Raises exceptions in the presence
460
+ of parameterization that can be detected as incorrect from the configuration instance alone.
461
+
462
+ Note that some parameters not validated here are best validated at generate runtime, as they may depend on
463
+ other inputs and/or the model, such as parameters related to the generation length.
464
+
465
+ Arg:
466
+ is_init (`bool`, *optional*, defaults to `False`):
467
+ Whether the validation is performed during the initialization of the instance.
468
+ """
469
+
470
+ # Validation of individual attributes
471
+ if self.early_stopping not in {True, False, "never"}:
472
+ raise ValueError(f"`early_stopping` must be a boolean or 'never', but is {self.early_stopping}.")
473
+ if self.max_new_tokens is not None and self.max_new_tokens <= 0:
474
+ raise ValueError(f"`max_new_tokens` must be greater than 0, but is {self.max_new_tokens}.")
475
+
476
+ # Validation of attribute relations:
477
+ fix_location = ""
478
+ if is_init:
479
+ fix_location = (
480
+ " This was detected when initializing the generation config instance, which means the corresponding "
481
+ "file may hold incorrect parameterization and should be fixed."
482
+ )
483
+
484
+ # 1. detect sampling-only parameterization when not in sampling mode
485
+ if self.do_sample is False:
486
+ greedy_wrong_parameter_msg = (
487
+ "`do_sample` is set to `False`. However, `{flag_name}` is set to `{flag_value}` -- this flag is only "
488
+ "used in sample-based generation modes. You should set `do_sample=True` or unset `{flag_name}`."
489
+ + fix_location
490
+ )
491
+ if self.temperature is not None and self.temperature != 1.0:
492
+ warnings.warn(
493
+ greedy_wrong_parameter_msg.format(flag_name="temperature", flag_value=self.temperature),
494
+ UserWarning,
495
+ )
496
+ if self.top_p is not None and self.top_p != 1.0:
497
+ warnings.warn(
498
+ greedy_wrong_parameter_msg.format(flag_name="top_p", flag_value=self.top_p),
499
+ UserWarning,
500
+ )
501
+ if self.typical_p is not None and self.typical_p != 1.0:
502
+ warnings.warn(
503
+ greedy_wrong_parameter_msg.format(flag_name="typical_p", flag_value=self.typical_p),
504
+ UserWarning,
505
+ )
506
+ if (
507
+ self.top_k is not None and self.top_k != 50 and self.penalty_alpha is None
508
+ ): # contrastive search uses top_k
509
+ warnings.warn(
510
+ greedy_wrong_parameter_msg.format(flag_name="top_k", flag_value=self.top_k),
511
+ UserWarning,
512
+ )
513
+ if self.epsilon_cutoff is not None and self.epsilon_cutoff != 0.0:
514
+ warnings.warn(
515
+ greedy_wrong_parameter_msg.format(flag_name="epsilon_cutoff", flag_value=self.epsilon_cutoff),
516
+ UserWarning,
517
+ )
518
+ if self.eta_cutoff is not None and self.eta_cutoff != 0.0:
519
+ warnings.warn(
520
+ greedy_wrong_parameter_msg.format(flag_name="eta_cutoff", flag_value=self.eta_cutoff),
521
+ UserWarning,
522
+ )
523
+
524
+ # 2. detect beam-only parameterization when not in beam mode
525
+ if self.num_beams is None:
526
+ warnings.warn("`num_beams` is set to None - defaulting to 1.", UserWarning)
527
+ self.num_beams = 1
528
+
529
+ if self.num_beams == 1:
530
+ single_beam_wrong_parameter_msg = (
531
+ "`num_beams` is set to 1. However, `{flag_name}` is set to `{flag_value}` -- this flag is only used "
532
+ "in beam-based generation modes. You should set `num_beams>1` or unset `{flag_name}`." + fix_location
533
+ )
534
+ if self.early_stopping is not False:
535
+ warnings.warn(
536
+ single_beam_wrong_parameter_msg.format(flag_name="early_stopping", flag_value=self.early_stopping),
537
+ UserWarning,
538
+ )
539
+ if self.num_beam_groups is not None and self.num_beam_groups != 1:
540
+ warnings.warn(
541
+ single_beam_wrong_parameter_msg.format(
542
+ flag_name="num_beam_groups", flag_value=self.num_beam_groups
543
+ ),
544
+ UserWarning,
545
+ )
546
+ if self.diversity_penalty is not None and self.diversity_penalty != 0.0:
547
+ warnings.warn(
548
+ single_beam_wrong_parameter_msg.format(
549
+ flag_name="diversity_penalty", flag_value=self.diversity_penalty
550
+ ),
551
+ UserWarning,
552
+ )
553
+ if self.length_penalty is not None and self.length_penalty != 1.0:
554
+ warnings.warn(
555
+ single_beam_wrong_parameter_msg.format(flag_name="length_penalty", flag_value=self.length_penalty),
556
+ UserWarning,
557
+ )
558
+ if self.constraints is not None:
559
+ warnings.warn(
560
+ single_beam_wrong_parameter_msg.format(flag_name="constraints", flag_value=self.constraints),
561
+ UserWarning,
562
+ )
563
+
564
+ # 3. detect incorrect paramaterization specific to advanced beam modes
565
+ else:
566
+ # constrained beam search
567
+ if self.constraints is not None or self.force_words_ids is not None:
568
+ constrained_wrong_parameter_msg = (
569
+ "one of `constraints`, `force_words_ids` is not `None`, triggering constrained beam search. However, "
570
+ "`{flag_name}` is set to `{flag_value}`, which is incompatible with this generation mode. Set "
571
+ "`constraints` and `force_words_ids` to `None` or unset `{flag_name}` to continue." + fix_location
572
+ )
573
+ if self.do_sample is True:
574
+ raise ValueError(
575
+ constrained_wrong_parameter_msg.format(flag_name="do_sample", flag_value=self.do_sample)
576
+ )
577
+ if self.num_beam_groups is not None and self.num_beam_groups != 1:
578
+ raise ValueError(
579
+ constrained_wrong_parameter_msg.format(
580
+ flag_name="num_beam_groups", flag_value=self.num_beam_groups
581
+ )
582
+ )
583
+ # group beam search
584
+ if self.diversity_penalty != 0.0 or self.num_beam_groups != 1:
585
+ group_error_prefix = (
586
+ "`diversity_penalty` is not 0.0 or `num_beam_groups` is not 1, triggering group beam search. In "
587
+ "this generation mode, "
588
+ )
589
+ if self.do_sample is True:
590
+ raise ValueError(group_error_prefix + "`do_sample` must be set to `False`")
591
+ if self.num_beams % self.num_beam_groups != 0:
592
+ raise ValueError(group_error_prefix + "`num_beams` should be divisible by `num_beam_groups`")
593
+ if self.diversity_penalty == 0.0:
594
+ raise ValueError(
595
+ group_error_prefix
596
+ + "`diversity_penalty` should be greater than `0.0`, otherwise your groups will be identical."
597
+ )
598
+
599
+ # 4. check `num_return_sequences`
600
+ if self.num_return_sequences != 1:
601
+ if self.num_beams == 1:
602
+ if self.do_sample is False:
603
+ raise ValueError(
604
+ "Greedy methods without beam search do not support `num_return_sequences` different than 1 "
605
+ f"(got {self.num_return_sequences})."
606
+ )
607
+ elif self.num_return_sequences > self.num_beams:
608
+ raise ValueError(
609
+ f"`num_return_sequences` ({self.num_return_sequences}) has to be smaller or equal to `num_beams` "
610
+ f"({self.num_beams})."
611
+ )
612
+
613
+ # 5. check common issue: passing `generate` arguments inside the generation config
614
+ generate_arguments = (
615
+ "logits_processor",
616
+ "stopping_criteria",
617
+ "prefix_allowed_tokens_fn",
618
+ "synced_gpus",
619
+ "assistant_model",
620
+ "streamer",
621
+ "negative_prompt_ids",
622
+ "negative_prompt_attention_mask",
623
+ )
624
+ for arg in generate_arguments:
625
+ if hasattr(self, arg):
626
+ raise ValueError(
627
+ f"Argument `{arg}` is not a valid argument of `GenerationConfig`. It should be passed to "
628
+ "`generate()` (or a pipeline) directly."
629
+ )
630
+
631
+ def save_pretrained(
632
+ self,
633
+ save_directory: Union[str, os.PathLike],
634
+ config_file_name: Optional[Union[str, os.PathLike]] = None,
635
+ push_to_hub: bool = False,
636
+ **kwargs,
637
+ ):
638
+ r"""
639
+ Save a generation configuration object to the directory `save_directory`, so that it can be re-loaded using the
640
+ [`~GenerationConfig.from_pretrained`] class method.
641
+
642
+ Args:
643
+ save_directory (`str` or `os.PathLike`):
644
+ Directory where the configuration JSON file will be saved (will be created if it does not exist).
645
+ config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`):
646
+ Name of the generation configuration JSON file to be saved in `save_directory`.
647
+ push_to_hub (`bool`, *optional*, defaults to `False`):
648
+ Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
649
+ repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
650
+ namespace).
651
+ kwargs (`Dict[str, Any]`, *optional*):
652
+ Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
653
+ """
654
+
655
+ # At save time, validate the instance -- if any warning/exception is thrown, we refuse to save the instance.
656
+ # This strictness is enforced to prevent bad configurations from being saved and re-used.
657
+ try:
658
+ with warnings.catch_warnings(record=True) as caught_warnings:
659
+ self.validate()
660
+ if len(caught_warnings) > 0:
661
+ raise ValueError(str([w.message for w in caught_warnings]))
662
+ except ValueError as exc:
663
+ raise ValueError(
664
+ "The generation config instance is invalid -- `.validate()` throws warnings and/or exceptions. "
665
+ "Fix these issues to save the configuration.\n\nThrown during validation:\n" + str(exc)
666
+ )
667
+
668
+ use_auth_token = kwargs.pop("use_auth_token", None)
669
+
670
+ if use_auth_token is not None:
671
+ warnings.warn(
672
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
673
+ FutureWarning,
674
+ )
675
+ if kwargs.get("token", None) is not None:
676
+ raise ValueError(
677
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
678
+ )
679
+ kwargs["token"] = use_auth_token
680
+
681
+ config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME
682
+
683
+ if os.path.isfile(save_directory):
684
+ raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
685
+
686
+ os.makedirs(save_directory, exist_ok=True)
687
+
688
+ if push_to_hub:
689
+ commit_message = kwargs.pop("commit_message", None)
690
+ repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
691
+ repo_id = self._create_repo(repo_id, **kwargs)
692
+ files_timestamps = self._get_files_timestamps(save_directory)
693
+
694
+ output_config_file = os.path.join(save_directory, config_file_name)
695
+
696
+ self.to_json_file(output_config_file, use_diff=True)
697
+ logger.info(f"Configuration saved in {output_config_file}")
698
+
699
+ if push_to_hub:
700
+ self._upload_modified_files(
701
+ save_directory,
702
+ repo_id,
703
+ files_timestamps,
704
+ commit_message=commit_message,
705
+ token=kwargs.get("token"),
706
+ )
707
+
708
+ @classmethod
709
+ def from_pretrained(
710
+ cls,
711
+ pretrained_model_name: Union[str, os.PathLike],
712
+ config_file_name: Optional[Union[str, os.PathLike]] = None,
713
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
714
+ force_download: bool = False,
715
+ local_files_only: bool = False,
716
+ token: Optional[Union[str, bool]] = None,
717
+ revision: str = "main",
718
+ **kwargs,
719
+ ) -> "GenerationConfig":
720
+ r"""
721
+ Instantiate a [`GenerationConfig`] from a generation configuration file.
722
+
723
+ Args:
724
+ pretrained_model_name (`str` or `os.PathLike`):
725
+ This can be either:
726
+
727
+ - a string, the *model id* of a pretrained model configuration hosted inside a model repo on
728
+ huggingface.co.
729
+ - a path to a *directory* containing a configuration file saved using the
730
+ [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`.
731
+ config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`):
732
+ Name of the generation configuration JSON file to be loaded from `pretrained_model_name`.
733
+ cache_dir (`str` or `os.PathLike`, *optional*):
734
+ Path to a directory in which a downloaded pretrained model configuration should be cached if the
735
+ standard cache should not be used.
736
+ force_download (`bool`, *optional*, defaults to `False`):
737
+ Whether or not to force to (re-)download the configuration files and override the cached versions if
738
+ they exist.
739
+ resume_download (`bool`, *optional*, defaults to `False`):
740
+ Whether or not to delete incompletely received file. Attempts to resume the download if such a file
741
+ exists.
742
+ proxies (`Dict[str, str]`, *optional*):
743
+ A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
744
+ 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
745
+ token (`str` or `bool`, *optional*):
746
+ The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
747
+ the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
748
+ revision (`str`, *optional*, defaults to `"main"`):
749
+ The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
750
+ git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
751
+ identifier allowed by git.
752
+
753
+ <Tip>
754
+
755
+ To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".
756
+
757
+ </Tip>
758
+
759
+ return_unused_kwargs (`bool`, *optional*, defaults to `False`):
760
+ If `False`, then this function returns just the final configuration object.
761
+
762
+ If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a
763
+ dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the
764
+ part of `kwargs` which has not been used to update `config` and is otherwise ignored.
765
+ subfolder (`str`, *optional*, defaults to `""`):
766
+ In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
767
+ specify the folder name here.
768
+ kwargs (`Dict[str, Any]`, *optional*):
769
+ The values in kwargs of any keys which are configuration attributes will be used to override the loaded
770
+ values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
771
+ by the `return_unused_kwargs` keyword parameter.
772
+
773
+ Returns:
774
+ [`GenerationConfig`]: The configuration object instantiated from this pretrained model.
775
+
776
+ Examples:
777
+
778
+ ```python
779
+ >>> from transformers import GenerationConfig
780
+
781
+ >>> # Download configuration from huggingface.co and cache.
782
+ >>> generation_config = GenerationConfig.from_pretrained("openai-community/gpt2")
783
+
784
+ >>> # E.g. config was saved using *save_pretrained('./test/saved_model/')*
785
+ >>> generation_config.save_pretrained("./test/saved_model/")
786
+ >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/")
787
+
788
+ >>> # You can also specify configuration names to your generation configuration file
789
+ >>> generation_config.save_pretrained("./test/saved_model/", config_file_name="my_configuration.json")
790
+ >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/", "my_configuration.json")
791
+
792
+ >>> # If you'd like to try a minor variation to an existing configuration, you can also pass generation
793
+ >>> # arguments to `.from_pretrained()`. Be mindful that typos and unused arguments will be ignored
794
+ >>> generation_config, unused_kwargs = GenerationConfig.from_pretrained(
795
+ ... "openai-community/gpt2", top_k=1, foo=False, do_sample=True, return_unused_kwargs=True
796
+ ... )
797
+ >>> generation_config.top_k
798
+ 1
799
+
800
+ >>> unused_kwargs
801
+ {'foo': False}
802
+ ```"""
803
+ config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME
804
+
805
+ resume_download = kwargs.pop("resume_download", False)
806
+ proxies = kwargs.pop("proxies", None)
807
+ use_auth_token = kwargs.pop("use_auth_token", None)
808
+ subfolder = kwargs.pop("subfolder", "")
809
+ from_pipeline = kwargs.pop("_from_pipeline", None)
810
+ from_auto_class = kwargs.pop("_from_auto", False)
811
+ commit_hash = kwargs.pop("_commit_hash", None)
812
+
813
+ if use_auth_token is not None:
814
+ warnings.warn(
815
+ "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.",
816
+ FutureWarning,
817
+ )
818
+ if token is not None:
819
+ raise ValueError(
820
+ "`token` and `use_auth_token` are both specified. Please set only the argument `token`."
821
+ )
822
+ token = use_auth_token
823
+
824
+ user_agent = {"file_type": "config", "from_auto_class": from_auto_class}
825
+ if from_pipeline is not None:
826
+ user_agent["using_pipeline"] = from_pipeline
827
+
828
+ config_path = os.path.join(pretrained_model_name, config_file_name)
829
+ config_path = str(config_path)
830
+
831
+ is_local = os.path.exists(config_path)
832
+ if os.path.isfile(os.path.join(subfolder, config_path)):
833
+ # Special case when config_path is a local file
834
+ resolved_config_file = config_path
835
+ is_local = True
836
+ elif is_remote_url(config_path):
837
+ configuration_file = config_path
838
+ resolved_config_file = download_url(config_path)
839
+ else:
840
+ configuration_file = config_file_name
841
+ try:
842
+ # Load from local folder or from cache or download from model Hub and cache
843
+ resolved_config_file = cached_file(
844
+ pretrained_model_name,
845
+ configuration_file,
846
+ cache_dir=cache_dir,
847
+ force_download=force_download,
848
+ proxies=proxies,
849
+ resume_download=resume_download,
850
+ local_files_only=local_files_only,
851
+ token=token,
852
+ user_agent=user_agent,
853
+ revision=revision,
854
+ subfolder=subfolder,
855
+ _commit_hash=commit_hash,
856
+ )
857
+ commit_hash = extract_commit_hash(resolved_config_file, commit_hash)
858
+ except EnvironmentError:
859
+ # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
860
+ # the original exception.
861
+ raise
862
+ except Exception:
863
+ # For any other exception, we throw a generic error.
864
+ raise EnvironmentError(
865
+ f"Can't load the configuration of '{pretrained_model_name}'. If you were trying to load it"
866
+ " from 'https://huggingface.co/models', make sure you don't have a local directory with the same"
867
+ f" name. Otherwise, make sure '{pretrained_model_name}' is the correct path to a directory"
868
+ f" containing a {configuration_file} file"
869
+ )
870
+
871
+ try:
872
+ # Load config dict
873
+ config_dict = cls._dict_from_json_file(resolved_config_file)
874
+ config_dict["_commit_hash"] = commit_hash
875
+ except (json.JSONDecodeError, UnicodeDecodeError):
876
+ raise EnvironmentError(
877
+ f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file."
878
+ )
879
+
880
+ if is_local:
881
+ logger.info(f"loading configuration file {resolved_config_file}")
882
+ else:
883
+ logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}")
884
+
885
+ if kwargs.get("return_unused_kwargs") is True:
886
+ config, unused_kwargs = cls.from_dict(config_dict, **kwargs)
887
+ config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
888
+ return config, unused_kwargs
889
+ else:
890
+ config = cls.from_dict(config_dict, **kwargs)
891
+ config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
892
+ return config
893
+
894
+ @classmethod
895
+ def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
896
+ with open(json_file, "r", encoding="utf-8") as reader:
897
+ text = reader.read()
898
+ return json.loads(text)
899
+
900
+ @classmethod
901
+ def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "GenerationConfig":
902
+ """
903
+ Instantiates a [`GenerationConfig`] from a Python dictionary of parameters.
904
+
905
+ Args:
906
+ config_dict (`Dict[str, Any]`):
907
+ Dictionary that will be used to instantiate the configuration object.
908
+ kwargs (`Dict[str, Any]`):
909
+ Additional parameters from which to initialize the configuration object.
910
+
911
+ Returns:
912
+ [`GenerationConfig`]: The configuration object instantiated from those parameters.
913
+ """
914
+ return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
915
+ # Those arguments may be passed along for our internal telemetry.
916
+ # We remove them so they don't appear in `return_unused_kwargs`.
917
+ kwargs.pop("_from_auto", None)
918
+ kwargs.pop("_from_pipeline", None)
919
+ # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update.
920
+ if "_commit_hash" in kwargs and "_commit_hash" in config_dict:
921
+ kwargs["_commit_hash"] = config_dict["_commit_hash"]
922
+
923
+ # The line below allows model-specific config to be loaded as well through kwargs, with safety checks.
924
+ # See https://github.com/huggingface/transformers/pull/21269
925
+ config = cls(**{**config_dict, **kwargs})
926
+ unused_kwargs = config.update(**kwargs)
927
+
928
+ logger.info(f"Generate config {config}")
929
+ if return_unused_kwargs:
930
+ return config, unused_kwargs
931
+ else:
932
+ return config
933
+
934
+ def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None:
935
+ """
936
+ Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None,
937
+ converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"*
938
+ string, which can then be stored in the json format.
939
+ """
940
+ if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str):
941
+ d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1]
942
+ for value in d.values():
943
+ if isinstance(value, dict):
944
+ self.dict_torch_dtype_to_str(value)
945
+
946
+ def to_diff_dict(self) -> Dict[str, Any]:
947
+ """
948
+ Removes all attributes from config which correspond to the default config attributes for better readability and
949
+ serializes to a Python dictionary.
950
+
951
+ Returns:
952
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
953
+ """
954
+ config_dict = self.to_dict()
955
+
956
+ # get the default config dict
957
+ default_config_dict = GenerationConfig().to_dict()
958
+
959
+ serializable_config_dict = {}
960
+
961
+ # only serialize values that differ from the default config
962
+ for key, value in config_dict.items():
963
+ if key not in default_config_dict or key == "transformers_version" or value != default_config_dict[key]:
964
+ serializable_config_dict[key] = value
965
+
966
+ self.dict_torch_dtype_to_str(serializable_config_dict)
967
+ return serializable_config_dict
968
+
969
+ def to_dict(self) -> Dict[str, Any]:
970
+ """
971
+ Serializes this instance to a Python dictionary.
972
+
973
+ Returns:
974
+ `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
975
+ """
976
+ output = copy.deepcopy(self.__dict__)
977
+
978
+ # Fields to ignore at serialization time
979
+ if "_commit_hash" in output:
980
+ del output["_commit_hash"]
981
+ if "_original_object_hash" in output:
982
+ del output["_original_object_hash"]
983
+
984
+ # Transformers version when serializing this file
985
+ output["transformers_version"] = __version__
986
+
987
+ self.dict_torch_dtype_to_str(output)
988
+ return output
989
+
990
+ def to_json_string(self, use_diff: bool = True, ignore_metadata: bool = False) -> str:
991
+ """
992
+ Serializes this instance to a JSON string.
993
+
994
+ Args:
995
+ use_diff (`bool`, *optional*, defaults to `True`):
996
+ If set to `True`, only the difference between the config instance and the default `GenerationConfig()`
997
+ is serialized to JSON string.
998
+ ignore_metadata (`bool`, *optional*, defaults to `False`):
999
+ Whether to ignore the metadata fields present in the instance
1000
+
1001
+ Returns:
1002
+ `str`: String containing all the attributes that make up this configuration instance in JSON format.
1003
+ """
1004
+ if use_diff is True:
1005
+ config_dict = self.to_diff_dict()
1006
+ else:
1007
+ config_dict = self.to_dict()
1008
+
1009
+ if ignore_metadata:
1010
+ for metadata_field in METADATA_FIELDS:
1011
+ config_dict.pop(metadata_field, None)
1012
+
1013
+ def convert_keys_to_string(obj):
1014
+ if isinstance(obj, dict):
1015
+ return {str(key): convert_keys_to_string(value) for key, value in obj.items()}
1016
+ elif isinstance(obj, list):
1017
+ return [convert_keys_to_string(item) for item in obj]
1018
+ else:
1019
+ return obj
1020
+
1021
+ config_dict = convert_keys_to_string(config_dict)
1022
+
1023
+ return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
1024
+
1025
+ def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
1026
+ """
1027
+ Save this instance to a JSON file.
1028
+
1029
+ Args:
1030
+ json_file_path (`str` or `os.PathLike`):
1031
+ Path to the JSON file in which this configuration instance's parameters will be saved.
1032
+ use_diff (`bool`, *optional*, defaults to `True`):
1033
+ If set to `True`, only the difference between the config instance and the default `GenerationConfig()`
1034
+ is serialized to JSON file.
1035
+ """
1036
+ with open(json_file_path, "w", encoding="utf-8") as writer:
1037
+ writer.write(self.to_json_string(use_diff=use_diff))
1038
+
1039
+ @classmethod
1040
+ def from_model_config(cls, model_config: PretrainedConfig) -> "GenerationConfig":
1041
+ """
1042
+ Instantiates a [`GenerationConfig`] from a [`PretrainedConfig`]. This function is useful to convert legacy
1043
+ [`PretrainedConfig`] objects, which may contain generation parameters, into a stand-alone [`GenerationConfig`].
1044
+
1045
+ Args:
1046
+ model_config (`PretrainedConfig`):
1047
+ The model config that will be used to instantiate the generation config.
1048
+
1049
+ Returns:
1050
+ [`GenerationConfig`]: The configuration object instantiated from those parameters.
1051
+ """
1052
+ config_dict = model_config.to_dict()
1053
+ config_dict.pop("_from_model_config", None)
1054
+ config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True)
1055
+
1056
+ # Special case: some models have generation attributes set in the decoder. Use them if still unset in the
1057
+ # generation config.
1058
+ for decoder_name in ("decoder", "generator", "text_config"):
1059
+ if decoder_name in config_dict:
1060
+ default_generation_config = GenerationConfig()
1061
+ decoder_config = config_dict[decoder_name]
1062
+ for attr in config.to_dict().keys():
1063
+ if attr in decoder_config and getattr(config, attr) == getattr(default_generation_config, attr):
1064
+ setattr(config, attr, decoder_config[attr])
1065
+
1066
+ config._original_object_hash = hash(config) # Hash to detect whether the instance was modified
1067
+ return config
1068
+
1069
+ def update(self, **kwargs):
1070
+ """
1071
+ Updates attributes of this class instance with attributes from `kwargs` if they match existing atributtes,
1072
+ returning all the unused kwargs.
1073
+
1074
+ Args:
1075
+ kwargs (`Dict[str, Any]`):
1076
+ Dictionary of attributes to tentatively update this class.
1077
+
1078
+ Returns:
1079
+ `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance.
1080
+ """
1081
+ to_remove = []
1082
+ for key, value in kwargs.items():
1083
+ if hasattr(self, key):
1084
+ setattr(self, key, value)
1085
+ to_remove.append(key)
1086
+
1087
+ # Confirm that the updated instance is still valid
1088
+ self.validate()
1089
+
1090
+ # Remove all the attributes that were updated, without modifying the input dict
1091
+ unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove}
1092
+ return unused_kwargs
llmeval-env/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py ADDED
@@ -0,0 +1,544 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import inspect
17
+
18
+ import jax
19
+ import jax.lax as lax
20
+ import jax.numpy as jnp
21
+ from jax.experimental import sparse
22
+
23
+ from ..utils import add_start_docstrings
24
+ from ..utils.logging import get_logger
25
+
26
+
27
+ logger = get_logger(__name__)
28
+
29
+
30
+ LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
31
+ Args:
32
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
33
+ Indices of input sequence tokens in the vocabulary.
34
+
35
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
36
+ [`PreTrainedTokenizer.__call__`] for details.
37
+
38
+ [What are input IDs?](../glossary#input-ids)
39
+ scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):
40
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
41
+ search or log softmax for each vocabulary token when using beam search
42
+ kwargs (`Dict[str, Any]`, *optional*):
43
+ Additional logits processor specific kwargs.
44
+
45
+ Return:
46
+ `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
47
+
48
+ """
49
+
50
+
51
+ class FlaxLogitsProcessor:
52
+ """Abstract base class for all logit processors that can be applied during generation."""
53
+
54
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
55
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
56
+ """Flax method for processing logits."""
57
+ raise NotImplementedError(
58
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
59
+ )
60
+
61
+
62
+ class FlaxLogitsWarper:
63
+ """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
64
+
65
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
66
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray:
67
+ """Flax method for warping logits."""
68
+ raise NotImplementedError(
69
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
70
+ )
71
+
72
+
73
+ class FlaxLogitsProcessorList(list):
74
+ """
75
+ This class can be used to create a list of [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to subsequently process
76
+ a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each
77
+ [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to the inputs.
78
+ """
79
+
80
+ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING)
81
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int, **kwargs) -> jnp.ndarray:
82
+ for processor in self:
83
+ function_args = inspect.signature(processor.__call__).parameters
84
+ if len(function_args) > 3:
85
+ if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
86
+ raise ValueError(
87
+ f"Make sure that all the required parameters: {list(function_args.keys())} for "
88
+ f"{processor.__class__} are passed to the logits processor."
89
+ )
90
+ scores = processor(input_ids, scores, cur_len, **kwargs)
91
+ else:
92
+ scores = processor(input_ids, scores, cur_len)
93
+ return scores
94
+
95
+
96
+ class FlaxTemperatureLogitsWarper(FlaxLogitsWarper):
97
+ r"""
98
+ [`FlaxLogitsWarper`] for temperature (exponential scaling output probability distribution).
99
+
100
+ Args:
101
+ temperature (`float`):
102
+ The value used to module the logits distribution.
103
+ """
104
+
105
+ def __init__(self, temperature: float):
106
+ if not isinstance(temperature, float) or not (temperature > 0):
107
+ raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
108
+
109
+ self.temperature = temperature
110
+
111
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
112
+ scores = scores / self.temperature
113
+ return scores
114
+
115
+
116
+ class FlaxTopPLogitsWarper(FlaxLogitsWarper):
117
+ """
118
+ [`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off.
119
+
120
+ Args:
121
+ top_p (`float`):
122
+ If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
123
+ higher are kept for generation.
124
+ filter_value (`float`, *optional*, defaults to -inf):
125
+ All filtered values will be set to this float value.
126
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
127
+ Minimum number of tokens that cannot be filtered.
128
+ """
129
+
130
+ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
131
+ if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
132
+ raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
133
+ if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
134
+ raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
135
+
136
+ self.top_p = top_p
137
+ self.filter_value = filter_value
138
+ self.min_tokens_to_keep = min_tokens_to_keep
139
+
140
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
141
+ topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1])
142
+
143
+ mask_scores = jnp.full_like(scores, self.filter_value)
144
+ cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1)
145
+ score_mask = cumulative_probs < self.top_p
146
+
147
+ # include the token that is higher than top_p as well
148
+ score_mask = jnp.roll(score_mask, 1)
149
+ score_mask |= score_mask.at[:, 0].set(True)
150
+
151
+ # min tokens to keep
152
+ score_mask = score_mask.at[:, : self.min_tokens_to_keep].set(True)
153
+
154
+ topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores)
155
+ next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1]
156
+
157
+ return next_scores
158
+
159
+
160
+ class FlaxTopKLogitsWarper(FlaxLogitsWarper):
161
+ r"""
162
+ [`FlaxLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.
163
+
164
+ Args:
165
+ top_k (`int`):
166
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
167
+ filter_value (`float`, *optional*, defaults to -inf):
168
+ All filtered values will be set to this float value.
169
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
170
+ Minimum number of tokens that cannot be filtered.
171
+ """
172
+
173
+ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
174
+ if not isinstance(top_k, int) or top_k <= 0:
175
+ raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
176
+
177
+ self.top_k = max(top_k, min_tokens_to_keep)
178
+ self.filter_value = filter_value
179
+
180
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
181
+ batch_size, vocab_size = scores.shape
182
+ next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value)
183
+
184
+ topk = min(self.top_k, scores.shape[-1]) # Safety check
185
+ topk_scores, topk_indices = lax.top_k(scores, topk)
186
+ shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten()
187
+ topk_scores_flat = topk_scores.flatten()
188
+ topk_indices_flat = topk_indices.flatten() + shift
189
+
190
+ next_scores_flat = next_scores_flat.at[topk_indices_flat].set(topk_scores_flat)
191
+ next_scores = next_scores_flat.reshape(batch_size, vocab_size)
192
+ return next_scores
193
+
194
+
195
+ class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor):
196
+ r"""
197
+ [`FlaxLogitsProcessor`] that enforces the specified token as the first generated token.
198
+
199
+ Args:
200
+ bos_token_id (`int`):
201
+ The id of the token to force as the first generated token.
202
+ """
203
+
204
+ def __init__(self, bos_token_id: int):
205
+ self.bos_token_id = bos_token_id
206
+
207
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
208
+ new_scores = jnp.full(scores.shape, -float("inf"))
209
+
210
+ apply_penalty = 1 - jnp.bool_(cur_len - 1)
211
+
212
+ scores = jnp.where(apply_penalty, new_scores.at[:, self.bos_token_id].set(0), scores)
213
+
214
+ return scores
215
+
216
+
217
+ class FlaxForcedEOSTokenLogitsProcessor(FlaxLogitsProcessor):
218
+ r"""
219
+ [`FlaxLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
220
+
221
+ Args:
222
+ max_length (`int`):
223
+ The maximum length of the sequence to be generated.
224
+ eos_token_id (`int`):
225
+ The id of the token to force as the last generated token when `max_length` is reached.
226
+ """
227
+
228
+ def __init__(self, max_length: int, eos_token_id: int):
229
+ self.max_length = max_length
230
+ self.eos_token_id = eos_token_id
231
+
232
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
233
+ new_scores = jnp.full(scores.shape, -float("inf"))
234
+
235
+ apply_penalty = 1 - jnp.bool_(cur_len - self.max_length + 1)
236
+
237
+ scores = jnp.where(apply_penalty, new_scores.at[:, self.eos_token_id].set(0), scores)
238
+
239
+ return scores
240
+
241
+
242
+ class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor):
243
+ r"""
244
+ [`FlaxLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
245
+
246
+ Args:
247
+ min_length (`int`):
248
+ The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
249
+ eos_token_id (`int`):
250
+ The id of the *end-of-sequence* token.
251
+ """
252
+
253
+ def __init__(self, min_length: int, eos_token_id: int):
254
+ if not isinstance(min_length, int) or min_length < 0:
255
+ raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
256
+
257
+ if not isinstance(eos_token_id, int) or eos_token_id < 0:
258
+ raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
259
+
260
+ self.min_length = min_length
261
+ self.eos_token_id = eos_token_id
262
+
263
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
264
+ # create boolean flag to decide if min length penalty should be applied
265
+ apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1)
266
+
267
+ scores = jnp.where(apply_penalty, scores.at[:, self.eos_token_id].set(-float("inf")), scores)
268
+
269
+ return scores
270
+
271
+
272
+ class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor):
273
+ r"""
274
+ [`FlaxLogitsProcessor`] supressing a list of tokens as soon as the `generate` function starts generating using
275
+ `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the
276
+ begining of the generation.
277
+
278
+ Args:
279
+ begin_suppress_tokens (`List[int]`):
280
+ Tokens to not sample.
281
+ begin_index (`int`):
282
+ Index where the tokens are suppressed.
283
+ """
284
+
285
+ def __init__(self, begin_suppress_tokens, begin_index):
286
+ self.begin_suppress_tokens = list(begin_suppress_tokens)
287
+ self.begin_index = begin_index
288
+
289
+ def __call__(self, input_ids, scores, cur_len: int):
290
+ apply_penalty = 1 - jnp.bool_(cur_len - self.begin_index)
291
+
292
+ scores = jnp.where(apply_penalty, scores.at[:, self.begin_suppress_tokens].set(-float("inf")), scores)
293
+
294
+ return scores
295
+
296
+
297
+ class FlaxSuppressTokensLogitsProcessor(FlaxLogitsProcessor):
298
+ r"""
299
+ [`FlaxLogitsProcessor`] suppressing a list of tokens at each decoding step. The processor will set their log probs
300
+ to be `-inf` so they are not sampled.
301
+
302
+ Args:
303
+ suppress_tokens (`list`):
304
+ Tokens to not sample.
305
+ """
306
+
307
+ def __init__(self, suppress_tokens: list):
308
+ self.suppress_tokens = list(suppress_tokens)
309
+
310
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
311
+ scores = scores.at[..., self.suppress_tokens].set(-float("inf"))
312
+
313
+ return scores
314
+
315
+
316
+ class FlaxForceTokensLogitsProcessor(FlaxLogitsProcessor):
317
+ r"""
318
+ [`FlaxLogitsProcessor`] that takes a list of pairs of integers which indicates a mapping from generation indices to
319
+ token indices that will be forced before sampling. The processor will set their log probs to 0 and all other tokens
320
+ to `-inf` so that they are sampled at their corresponding index.
321
+
322
+ Args:
323
+ force_token_map (`list`):
324
+ Map giving token ids and indices where they will be forced to be sampled.
325
+ """
326
+
327
+ def __init__(self, force_token_map):
328
+ force_token_map = dict(force_token_map)
329
+ # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
330
+ # index of the array corresponds to the index of the token to be forced, for XLA compatibility.
331
+ # Indexes without forced tokens will have a negative value.
332
+ force_token_array = jnp.ones((max(force_token_map.keys()) + 1), dtype=jnp.int32) * -1
333
+ for index, token in force_token_map.items():
334
+ if token is not None:
335
+ force_token_array = force_token_array.at[index].set(token)
336
+ self.force_token_array = jnp.int32(force_token_array)
337
+
338
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
339
+ def _force_token(generation_idx):
340
+ batch_size = scores.shape[0]
341
+ current_token = self.force_token_array[generation_idx]
342
+
343
+ new_scores = jnp.ones_like(scores, dtype=scores.dtype) * -float("inf")
344
+ updates = jnp.zeros((batch_size, 1), dtype=scores.dtype)
345
+ new_scores = lax.dynamic_update_slice(new_scores, updates, (0, current_token))
346
+ return new_scores
347
+
348
+ scores = lax.cond(
349
+ cur_len >= self.force_token_array.shape[0],
350
+ # If the current length is geq than the length of force_token_array, the processor does nothing.
351
+ lambda: scores,
352
+ # Otherwise, it may force a certain token.
353
+ lambda: lax.cond(
354
+ self.force_token_array[cur_len] >= 0,
355
+ # Only valid (positive) tokens are forced
356
+ lambda: _force_token(cur_len),
357
+ # Otherwise, the processor does nothing.
358
+ lambda: scores,
359
+ ),
360
+ )
361
+ return scores
362
+
363
+
364
+ class FlaxWhisperTimeStampLogitsProcessor(FlaxLogitsProcessor):
365
+ r"""
366
+ Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log
367
+ probs to `inf` so that they are sampled at their corresponding index.
368
+
369
+ Args:
370
+ generate_config (`GenerateConfig`):
371
+ The generate config used to generate the output. The following parameters are required:
372
+ eos_token_id (`int`, *optional*, defaults to 50257):
373
+ The id of the *end-of-sequence* token.
374
+ no_timestamps_token_id (`int`, *optional*, defaults to 50363):
375
+ The id of the `"<|notimestamps|>"` token.
376
+ max_initial_timestamp_index (`int`, *optional*, defaults to 1):
377
+ Used to set the maximum value of the initial timestamp. This is used to prevent the model from
378
+ predicting timestamps that are too far in the future.
379
+ """
380
+
381
+ def __init__(self, generate_config, model_config, decoder_input_length):
382
+ self.eos_token_id = generate_config.eos_token_id
383
+ self.no_timestamps_token_id = generate_config.no_timestamps_token_id
384
+ self.timestamp_begin = generate_config.no_timestamps_token_id + 1
385
+
386
+ self.begin_index = decoder_input_length + 1
387
+
388
+ if generate_config.is_multilingual:
389
+ # room for language token and task token
390
+ self.begin_index += 2
391
+ if hasattr(generate_config, "max_initial_timestamp_index"):
392
+ self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index
393
+ else:
394
+ self.max_initial_timestamp_index = model_config.vocab_size
395
+ if self.max_initial_timestamp_index is None:
396
+ self.max_initial_timestamp_index = model_config.vocab_size
397
+
398
+ def __call__(self, input_ids, scores, cur_len):
399
+ # suppress <|notimestamps|> which is handled by without_timestamps
400
+ scores = scores.at[:, self.no_timestamps_token_id].set(-float("inf"))
401
+
402
+ def handle_pairs(input_ids_k, scores_k):
403
+ last_was_timestamp = jnp.where((cur_len - self.begin_index) >= 1, True, False)
404
+ last_was_timestamp = jnp.where(
405
+ input_ids_k[cur_len - 1] >= self.timestamp_begin,
406
+ True and last_was_timestamp,
407
+ False,
408
+ )
409
+
410
+ penultimate_was_timestamp = jnp.where((cur_len - self.begin_index) < 2, True, False)
411
+ penultimate_was_timestamp = jnp.where(
412
+ input_ids_k[cur_len - 2] >= self.timestamp_begin,
413
+ True,
414
+ penultimate_was_timestamp,
415
+ )
416
+
417
+ return jnp.where(
418
+ last_was_timestamp,
419
+ jnp.where(
420
+ penultimate_was_timestamp > 0,
421
+ scores_k.at[self.timestamp_begin :].set(-float("inf")),
422
+ scores_k.at[: self.eos_token_id].set(-float("inf")),
423
+ ),
424
+ scores_k,
425
+ )
426
+
427
+ scores = jax.vmap(handle_pairs)(input_ids, scores)
428
+
429
+ apply_max_initial_timestamp = jnp.where(cur_len == self.begin_index, True, False)
430
+ apply_max_initial_timestamp = jnp.where(
431
+ self.max_initial_timestamp_index is not None,
432
+ True and apply_max_initial_timestamp,
433
+ False,
434
+ )
435
+
436
+ last_allowed = self.timestamp_begin + self.max_initial_timestamp_index
437
+
438
+ scores = jnp.where(
439
+ apply_max_initial_timestamp,
440
+ scores.at[:, last_allowed + 1 :].set(-float("inf")),
441
+ scores,
442
+ )
443
+
444
+ # if sum of probability over timestamps is above any other token, sample timestamp
445
+ logprobs = jax.nn.log_softmax(scores, axis=-1)
446
+
447
+ def handle_cumulative_probs(logprobs_k, scores_k):
448
+ timestamp_logprob = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1)
449
+ max_text_token_logprob = jnp.max(logprobs_k[: self.timestamp_begin])
450
+ return jnp.where(
451
+ timestamp_logprob > max_text_token_logprob,
452
+ scores_k.at[: self.timestamp_begin].set(-float("inf")),
453
+ scores_k,
454
+ )
455
+
456
+ scores = jax.vmap(handle_cumulative_probs)(logprobs, scores)
457
+
458
+ return scores
459
+
460
+
461
+ class FlaxNoRepeatNGramLogitsProcessor(FlaxLogitsProcessor):
462
+ r"""
463
+ [`FlaxLogitsProcessor`] that enforces no repetition of n-grams. See
464
+ [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345).
465
+
466
+ Args:
467
+ ngram_size (`int`):
468
+ All ngrams of size `ngram_size` can only occur once.
469
+ """
470
+
471
+ def __init__(self, ngram_size: int):
472
+ if not isinstance(ngram_size, int) or ngram_size <= 0:
473
+ raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}")
474
+ self.ngram_size = ngram_size
475
+
476
+ def get_previous_ngrams(self, input_ids: jnp.ndarray, vocab_size: int, cur_len: int):
477
+ """
478
+ get a matrix of size (batch_size,) + (vocab_size,)*n (for n-grams) that
479
+ represent the n-grams that occured previously.
480
+ The BCOO representation allow to store only the few non-zero entries, instead of the full (huge) matrix
481
+ """
482
+ batch_size, seq_len = input_ids.shape
483
+ # number of n-grams in the whole sequence
484
+ seq_ngrams = seq_len - (self.ngram_size - 1)
485
+ # number of n-grams in the currently generated sequence
486
+ cur_ngrams = cur_len - (self.ngram_size - 1)
487
+
488
+ def body_fun(i, val):
489
+ b = i % batch_size
490
+ pos = i // batch_size
491
+ return val.at[i].set(
492
+ jnp.array(
493
+ [
494
+ b,
495
+ ]
496
+ + [jnp.array(input_ids)[b, pos + j] for j in range(self.ngram_size)]
497
+ )
498
+ )
499
+
500
+ shape = (batch_size * seq_ngrams, self.ngram_size + 1)
501
+ all_update_indices = jax.lax.fori_loop(
502
+ 0, batch_size * cur_ngrams, body_fun, jnp.zeros(shape, dtype=input_ids.dtype)
503
+ )
504
+
505
+ # ignore the n-grams not yet generated
506
+ data = (jnp.arange(batch_size * seq_ngrams) < batch_size * cur_ngrams).astype("float32")
507
+
508
+ return sparse.BCOO((data, all_update_indices), shape=(batch_size,) + (vocab_size,) * self.ngram_size)
509
+
510
+ def get_banned_tokens_mask(self, latest_tokens: jnp.ndarray, previous_ngrams) -> jnp.ndarray:
511
+ """
512
+ Determines which tokens must be banned given latest tokens and the previously seen
513
+ ngrams.
514
+ """
515
+
516
+ @sparse.sparsify
517
+ @jax.vmap
518
+ def inner_fn(latest_tokens, previous_ngrams):
519
+ return previous_ngrams[tuple(latest_tokens)]
520
+
521
+ return sparse.bcoo_todense(inner_fn(latest_tokens, previous_ngrams))
522
+
523
+ def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray:
524
+ def true_fn():
525
+ _, vocab_size = scores.shape
526
+ # store the previously seen n-grams
527
+ previous_ngrams = self.get_previous_ngrams(input_ids, vocab_size, cur_len)
528
+
529
+ # get the n-1 last tokens that prefix the n-gram being generated
530
+ latest_tokens = jnp.zeros((input_ids.shape[0], self.ngram_size - 1), dtype=input_ids.dtype)
531
+ latest_tokens = jax.lax.dynamic_update_slice(
532
+ latest_tokens,
533
+ jax.lax.dynamic_slice(
534
+ input_ids, (0, cur_len - (self.ngram_size - 1)), (input_ids.shape[0], (self.ngram_size - 1))
535
+ ),
536
+ (0, 0),
537
+ )
538
+
539
+ # compute the banned tokens, ie all the tokens that when added to the latest tokens lead to a n-gram that was previously generated
540
+ banned_tokens_indices_mask = self.get_banned_tokens_mask(latest_tokens, previous_ngrams).astype("bool")
541
+ return jnp.where(banned_tokens_indices_mask, -float("inf"), scores)
542
+
543
+ output = jax.lax.cond((cur_len >= self.ngram_size - 1), true_fn, lambda: scores)
544
+ return output
llmeval-env/lib/python3.10/site-packages/transformers/generation/flax_utils.py ADDED
@@ -0,0 +1,1022 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team.
3
+ # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+
18
+ import copy
19
+ import inspect
20
+ import warnings
21
+ from functools import partial
22
+ from typing import Any, Dict, Optional, Union
23
+
24
+ import flax
25
+ import jax
26
+ import jax.numpy as jnp
27
+ import numpy as np
28
+ from jax import lax
29
+
30
+ from ..models.auto import (
31
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
32
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
33
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
34
+ )
35
+ from ..utils import ModelOutput, logging
36
+ from .configuration_utils import GenerationConfig
37
+ from .flax_logits_process import (
38
+ FlaxForcedBOSTokenLogitsProcessor,
39
+ FlaxForcedEOSTokenLogitsProcessor,
40
+ FlaxForceTokensLogitsProcessor,
41
+ FlaxLogitsProcessorList,
42
+ FlaxMinLengthLogitsProcessor,
43
+ FlaxNoRepeatNGramLogitsProcessor,
44
+ FlaxSuppressTokensAtBeginLogitsProcessor,
45
+ FlaxSuppressTokensLogitsProcessor,
46
+ FlaxTemperatureLogitsWarper,
47
+ FlaxTopKLogitsWarper,
48
+ FlaxTopPLogitsWarper,
49
+ )
50
+
51
+
52
+ logger = logging.get_logger(__name__)
53
+
54
+
55
+ @flax.struct.dataclass
56
+ class FlaxGreedySearchOutput(ModelOutput):
57
+ """
58
+ Flax Base class for outputs of decoder-only generation models using greedy search.
59
+
60
+
61
+ Args:
62
+ sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
63
+ The generated sequences.
64
+ """
65
+
66
+ sequences: jnp.ndarray = None
67
+
68
+
69
+ @flax.struct.dataclass
70
+ class FlaxSampleOutput(ModelOutput):
71
+ """
72
+ Flax Base class for outputs of decoder-only generation models using sampling.
73
+
74
+
75
+ Args:
76
+ sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
77
+ The generated sequences.
78
+ """
79
+
80
+ sequences: jnp.ndarray = None
81
+
82
+
83
+ @flax.struct.dataclass
84
+ class FlaxBeamSearchOutput(ModelOutput):
85
+ """
86
+ Flax Base class for outputs of decoder-only generation models using greedy search.
87
+
88
+
89
+ Args:
90
+ sequences (`jnp.ndarray` of shape `(batch_size, max_length)`):
91
+ The generated sequences.
92
+ scores (`jnp.ndarray` of shape `(batch_size,)`):
93
+ The scores (log probabilities) of the generated sequences.
94
+ """
95
+
96
+ sequences: jnp.ndarray = None
97
+ scores: jnp.ndarray = None
98
+
99
+
100
+ @flax.struct.dataclass
101
+ class GreedyState:
102
+ cur_len: jnp.ndarray
103
+ sequences: jnp.ndarray
104
+ running_token: jnp.ndarray
105
+ is_sent_finished: jnp.ndarray
106
+ model_kwargs: Dict[str, jnp.ndarray]
107
+
108
+
109
+ @flax.struct.dataclass
110
+ class SampleState:
111
+ cur_len: jnp.ndarray
112
+ sequences: jnp.ndarray
113
+ running_token: jnp.ndarray
114
+ is_sent_finished: jnp.ndarray
115
+ prng_key: jnp.ndarray
116
+ model_kwargs: Dict[str, jnp.ndarray]
117
+
118
+
119
+ @flax.struct.dataclass
120
+ class BeamSearchState:
121
+ cur_len: jnp.ndarray
122
+ running_sequences: jnp.ndarray
123
+ running_scores: jnp.ndarray
124
+ sequences: jnp.ndarray
125
+ scores: jnp.ndarray
126
+ is_sent_finished: jnp.ndarray
127
+ model_kwargs: Dict[str, jnp.ndarray]
128
+
129
+
130
+ class FlaxGenerationMixin:
131
+ """
132
+ A class containing all functions for auto-regressive text generation, to be used as a mixin in
133
+ [`FlaxPreTrainedModel`].
134
+
135
+ The class exposes [`~generation.FlaxGenerationMixin.generate`], which can be used for:
136
+ - *greedy decoding* by calling [`~generation.FlaxGenerationMixin._greedy_search`] if `num_beams=1` and
137
+ `do_sample=False`
138
+ - *multinomial sampling* by calling [`~generation.FlaxGenerationMixin._sample`] if `num_beams=1` and
139
+ `do_sample=True`
140
+ - *beam-search decoding* by calling [`~generation.FlaxGenerationMixin._beam_search`] if `num_beams>1` and
141
+ `do_sample=False`
142
+
143
+ You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To
144
+ learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies).
145
+ """
146
+
147
+ def prepare_inputs_for_generation(self, *args, **kwargs):
148
+ raise NotImplementedError(
149
+ "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`."
150
+ )
151
+
152
+ @staticmethod
153
+ def _run_loop_in_debug(cond_fn, body_fn, init_state):
154
+ """
155
+ Run generation in untraced mode. This should only be used for debugging purposes.
156
+ """
157
+ state = init_state
158
+ while cond_fn(state):
159
+ state = body_fn(state)
160
+ return state
161
+
162
+ def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, params, model_kwargs):
163
+ encoder_kwargs = {
164
+ argument: value
165
+ for argument, value in model_kwargs.items()
166
+ if not (argument.startswith("decoder_") or argument.startswith("cross_attn"))
167
+ }
168
+ model_kwargs["encoder_outputs"] = self.encode(input_ids, params=params, return_dict=True, **encoder_kwargs)
169
+ return model_kwargs
170
+
171
+ def _prepare_decoder_input_ids_for_generation(
172
+ self,
173
+ batch_size: int,
174
+ decoder_start_token_id: int = None,
175
+ bos_token_id: int = None,
176
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
177
+ ) -> jnp.ndarray:
178
+ if model_kwargs is not None and "decoder_input_ids" in model_kwargs:
179
+ # Only use this arg if not None, otherwise just remove from model_kwargs
180
+ decoder_input_ids = model_kwargs.pop("decoder_input_ids")
181
+ if decoder_input_ids is not None:
182
+ return decoder_input_ids
183
+ decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id)
184
+ return jnp.array(decoder_start_token_id, dtype="i4").reshape(1, -1).repeat(batch_size, axis=0)
185
+
186
+ def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int:
187
+ # retrieve decoder_start_token_id for encoder-decoder models
188
+ # fall back to bos_token_id if necessary
189
+ decoder_start_token_id = (
190
+ decoder_start_token_id
191
+ if decoder_start_token_id is not None
192
+ else self.generation_config.decoder_start_token_id
193
+ )
194
+ bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id
195
+ if decoder_start_token_id is not None:
196
+ return decoder_start_token_id
197
+ elif (
198
+ hasattr(self.config, "decoder")
199
+ and hasattr(self.config.decoder, "decoder_start_token_id")
200
+ and self.config.decoder.decoder_start_token_id is not None
201
+ ):
202
+ return self.config.decoder.decoder_start_token_id
203
+ elif bos_token_id is not None:
204
+ return bos_token_id
205
+ elif (
206
+ hasattr(self.config, "decoder")
207
+ and hasattr(self.config.decoder, "bos_token_id")
208
+ and self.config.decoder.bos_token_id is not None
209
+ ):
210
+ return self.config.decoder.bos_token_id
211
+ raise ValueError(
212
+ "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation."
213
+ )
214
+
215
+ @staticmethod
216
+ def _expand_to_num_beams(tensor, num_beams):
217
+ return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:])
218
+
219
+ def _adapt_logits_for_beam_search(self, logits):
220
+ """
221
+ This function can be overwritten in the specific modeling_flax_<model-name>.py classes to allow for custom beam
222
+ search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`].
223
+ """
224
+ return logits
225
+
226
+ def _validate_model_class(self):
227
+ """
228
+ Confirms that the model class is compatible with generation. If not, raises an exception that points to the
229
+ right class to use.
230
+ """
231
+ if not self.can_generate():
232
+ generate_compatible_mappings = [
233
+ FLAX_MODEL_FOR_CAUSAL_LM_MAPPING,
234
+ FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING,
235
+ FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
236
+ ]
237
+ generate_compatible_classes = set()
238
+ for model_mapping in generate_compatible_mappings:
239
+ supported_models = model_mapping.get(type(self.config), default=None)
240
+ if supported_models is not None:
241
+ generate_compatible_classes.add(supported_models.__name__)
242
+ exception_message = (
243
+ f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as "
244
+ "it doesn't have a language model head."
245
+ )
246
+ if generate_compatible_classes:
247
+ exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}"
248
+ raise TypeError(exception_message)
249
+
250
+ def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):
251
+ """Validates model kwargs for generation. Generate argument typos will also be caught here."""
252
+ unused_model_args = []
253
+ model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)
254
+ # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If
255
+ # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)
256
+ if "kwargs" in model_args or "model_kwargs" in model_args:
257
+ model_args |= set(inspect.signature(self.__call__).parameters)
258
+ for key, value in model_kwargs.items():
259
+ if value is not None and key not in model_args:
260
+ unused_model_args.append(key)
261
+
262
+ if unused_model_args:
263
+ raise ValueError(
264
+ f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the"
265
+ " generate arguments will also show up in this list)"
266
+ )
267
+
268
+ def generate(
269
+ self,
270
+ input_ids: jnp.ndarray,
271
+ generation_config: Optional[GenerationConfig] = None,
272
+ prng_key: Optional[jnp.ndarray] = None,
273
+ trace: bool = True,
274
+ params: Optional[Dict[str, jnp.ndarray]] = None,
275
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
276
+ **kwargs,
277
+ ):
278
+ r"""
279
+ Generates sequences of token ids for models with a language modeling head.
280
+
281
+ Parameters:
282
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
283
+ The sequence used as a prompt for the generation.
284
+ generation_config (`~generation.GenerationConfig`, *optional*):
285
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
286
+ passed to generate matching the attributes of `generation_config` will override them. If
287
+ `generation_config` is not provided, the default will be used, which had the following loading
288
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
289
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
290
+ default values, whose documentation should be checked to parameterize generation.
291
+ trace (`bool`, *optional*, defaults to `True`):
292
+ Whether to trace generation. Setting `trace=False` should only be used for debugging and will lead to a
293
+ considerably slower runtime.
294
+ params (`Dict[str, jnp.ndarray]`, *optional*):
295
+ Optionally the model parameters can be passed. Can be useful for parallelized generation.
296
+ logits_processor (`FlaxLogitsProcessorList `, *optional*):
297
+ Custom logits processors that complement the default logits processors built from arguments and
298
+ generation config. If a logit processor is passed that is already created with the arguments or a
299
+ generation config an error is thrown. This feature is intended for advanced users.
300
+ kwargs (`Dict[str, Any]`, *optional*):
301
+ Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
302
+ forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder
303
+ specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.
304
+
305
+ Return:
306
+ [`~utils.ModelOutput`].
307
+
308
+ """
309
+ # Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call
310
+ self._validate_model_class()
311
+
312
+ # priority: `generation_config` argument > `model.generation_config` (the default generation config)
313
+ if generation_config is None:
314
+ # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior,
315
+ # two conditions must be met
316
+ # 1) the generation config must have been created from the model config (`_from_model_config` field);
317
+ # 2) the generation config must have seen no modification since its creation (the hash is the same).
318
+ if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash(
319
+ self.generation_config
320
+ ):
321
+ new_generation_config = GenerationConfig.from_model_config(self.config)
322
+ if new_generation_config != self.generation_config:
323
+ warnings.warn(
324
+ "You have modified the pretrained model configuration to control generation. This is a"
325
+ " deprecated strategy to control generation and will be removed soon, in a future version."
326
+ " Please use and modify the model generation configuration (see"
327
+ " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )"
328
+ )
329
+ self.generation_config = new_generation_config
330
+ generation_config = self.generation_config
331
+
332
+ generation_config = copy.deepcopy(generation_config)
333
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
334
+ self._validate_model_kwargs(model_kwargs.copy())
335
+
336
+ logits_processor = logits_processor if logits_processor is not None else FlaxLogitsProcessorList()
337
+
338
+ # set init values
339
+ prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
340
+
341
+ if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:
342
+ if model_kwargs.get("attention_mask") is None:
343
+ logger.warning(
344
+ "The attention mask and the pad token id were not set. As a consequence, you may observe "
345
+ "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results."
346
+ )
347
+ eos_token_id = generation_config.eos_token_id
348
+ if isinstance(eos_token_id, list):
349
+ eos_token_id = eos_token_id[0]
350
+ logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.")
351
+ generation_config.pad_token_id = eos_token_id
352
+
353
+ if generation_config.decoder_start_token_id is None and self.config.is_encoder_decoder:
354
+ raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.")
355
+
356
+ # decoder-only models should use left-padding for generation (can't be checked with `trace=True`)
357
+ if not self.config.is_encoder_decoder and not trace:
358
+ if (
359
+ generation_config.pad_token_id is not None
360
+ and jnp.sum(input_ids[:, -1] == generation_config.pad_token_id) > 0
361
+ ):
362
+ logger.warning(
363
+ "A decoder-only architecture is being used, but right-padding was detected! For correct "
364
+ "generation results, please set `padding_side='left'` when initializing the tokenizer."
365
+ )
366
+
367
+ batch_size = input_ids.shape[0]
368
+
369
+ if self.config.is_encoder_decoder:
370
+ # add encoder_outputs to model_kwargs
371
+ if model_kwargs.get("encoder_outputs") is None:
372
+ model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, params, model_kwargs)
373
+ # prepare decoder_input_ids for generation
374
+ input_ids = self._prepare_decoder_input_ids_for_generation(
375
+ batch_size,
376
+ decoder_start_token_id=generation_config.decoder_start_token_id,
377
+ bos_token_id=generation_config.bos_token_id,
378
+ model_kwargs=model_kwargs,
379
+ )
380
+
381
+ # Prepare `max_length` depending on other stopping criteria.
382
+ input_ids_seq_length = input_ids.shape[-1]
383
+ has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None
384
+ if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20:
385
+ # 20 is the default max_length of the generation config
386
+ warnings.warn(
387
+ f"Using the model-agnostic default `max_length` (={generation_config.max_length}) "
388
+ "to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.",
389
+ UserWarning,
390
+ )
391
+ elif generation_config.max_new_tokens is not None:
392
+ if not has_default_max_length and generation_config.max_length is not None:
393
+ logger.warning(
394
+ f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(="
395
+ f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. "
396
+ "Please refer to the documentation for more information. "
397
+ "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)"
398
+ )
399
+ generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length
400
+
401
+ if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length:
402
+ raise ValueError(
403
+ f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger than"
404
+ f" the maximum length ({generation_config.max_length})"
405
+ )
406
+ if input_ids_seq_length >= generation_config.max_length:
407
+ input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids"
408
+ logger.warning(
409
+ f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to"
410
+ f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider"
411
+ " increasing`max_new_tokens`."
412
+ )
413
+
414
+ logits_processor = self._get_logits_processor(
415
+ generation_config=generation_config,
416
+ input_ids_seq_length=input_ids_seq_length,
417
+ logits_processor=logits_processor,
418
+ )
419
+
420
+ if not generation_config.do_sample and generation_config.num_beams == 1:
421
+ return self._greedy_search(
422
+ input_ids,
423
+ generation_config.max_length,
424
+ generation_config.pad_token_id,
425
+ generation_config.eos_token_id,
426
+ logits_processor=logits_processor,
427
+ trace=trace,
428
+ params=params,
429
+ model_kwargs=model_kwargs,
430
+ )
431
+ elif generation_config.do_sample and generation_config.num_beams == 1:
432
+ logits_warper = self._get_logits_warper(generation_config=generation_config)
433
+ return self._sample(
434
+ input_ids,
435
+ generation_config.max_length,
436
+ generation_config.pad_token_id,
437
+ generation_config.eos_token_id,
438
+ prng_key,
439
+ logits_warper=logits_warper,
440
+ logits_processor=logits_processor,
441
+ trace=trace,
442
+ params=params,
443
+ model_kwargs=model_kwargs,
444
+ )
445
+ elif not generation_config.do_sample and generation_config.num_beams > 1:
446
+ # broadcast input_ids & encoder_outputs
447
+ input_ids = self._expand_to_num_beams(input_ids, num_beams=generation_config.num_beams)
448
+
449
+ if "encoder_outputs" in model_kwargs:
450
+ model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams(
451
+ model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=generation_config.num_beams
452
+ )
453
+
454
+ for kwarg in ["attention_mask", "decoder_attention_mask"]:
455
+ if kwarg in model_kwargs:
456
+ model_kwargs[kwarg] = self._expand_to_num_beams(
457
+ model_kwargs[kwarg], num_beams=generation_config.num_beams
458
+ )
459
+
460
+ return self._beam_search(
461
+ input_ids,
462
+ generation_config.max_length,
463
+ generation_config.pad_token_id,
464
+ generation_config.eos_token_id,
465
+ length_penalty=generation_config.length_penalty,
466
+ early_stopping=generation_config.early_stopping,
467
+ logits_processor=logits_processor,
468
+ trace=trace,
469
+ params=params,
470
+ num_return_sequences=generation_config.num_return_sequences,
471
+ model_kwargs=model_kwargs,
472
+ )
473
+ else:
474
+ raise NotImplementedError("`Beam sampling is currently not implemented.")
475
+
476
+ def _get_logits_warper(self, generation_config: GenerationConfig) -> FlaxLogitsProcessorList:
477
+ """
478
+ This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsWarper`]
479
+ instances used for multinomial sampling.
480
+ """
481
+ warpers = FlaxLogitsProcessorList()
482
+
483
+ if generation_config.temperature is not None and generation_config.temperature != 1.0:
484
+ warpers.append(FlaxTemperatureLogitsWarper(generation_config.temperature))
485
+ if generation_config.top_k is not None and generation_config.top_k != 0:
486
+ warpers.append(FlaxTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1))
487
+ if generation_config.top_p is not None and generation_config.top_p < 1.0:
488
+ warpers.append(FlaxTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1))
489
+
490
+ return warpers
491
+
492
+ def _get_logits_processor(
493
+ self,
494
+ generation_config: GenerationConfig,
495
+ input_ids_seq_length: int,
496
+ logits_processor: Optional[FlaxLogitsProcessorList],
497
+ ) -> FlaxLogitsProcessorList:
498
+ """
499
+ This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsProcessor`]
500
+ instances used to modify the scores of the language model head.
501
+ """
502
+ processors = FlaxLogitsProcessorList()
503
+
504
+ if (
505
+ generation_config.min_length is not None
506
+ and generation_config.eos_token_id is not None
507
+ and generation_config.min_length > -1
508
+ ):
509
+ processors.append(
510
+ FlaxMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)
511
+ )
512
+ if generation_config.forced_bos_token_id is not None:
513
+ processors.append(FlaxForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id))
514
+ if generation_config.forced_eos_token_id is not None:
515
+ processors.append(
516
+ FlaxForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id)
517
+ )
518
+ if generation_config.suppress_tokens is not None:
519
+ processors.append(FlaxSuppressTokensLogitsProcessor(generation_config.suppress_tokens))
520
+ if generation_config.begin_suppress_tokens is not None:
521
+ begin_index = input_ids_seq_length
522
+ begin_index = (
523
+ begin_index
524
+ if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None)
525
+ else begin_index + 1
526
+ )
527
+ if generation_config.forced_decoder_ids is not None and len(generation_config.forced_decoder_ids) > 0:
528
+ # generation starts after the last token that is forced
529
+ begin_index += generation_config.forced_decoder_ids[-1][0]
530
+ processors.append(
531
+ FlaxSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index)
532
+ )
533
+ if generation_config.forced_decoder_ids is not None:
534
+ forced_decoder_ids = [
535
+ [input_ids_seq_length + i[0] - 1, i[1]] for i in generation_config.forced_decoder_ids
536
+ ]
537
+ processors.append(FlaxForceTokensLogitsProcessor(forced_decoder_ids))
538
+ if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0:
539
+ processors.append(FlaxNoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size))
540
+ processors = self._merge_criteria_processor_list(processors, logits_processor)
541
+
542
+ return processors
543
+
544
+ def _merge_criteria_processor_list(
545
+ self,
546
+ default_list: FlaxLogitsProcessorList,
547
+ custom_list: FlaxLogitsProcessorList,
548
+ ) -> FlaxLogitsProcessorList:
549
+ if len(custom_list) == 0:
550
+ return default_list
551
+ for default in default_list:
552
+ for custom in custom_list:
553
+ if type(custom) is type(default):
554
+ object_type = "logits processor"
555
+ raise ValueError(
556
+ f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to"
557
+ f" `generate`, but it has already been created with the values {default}. {default} has been"
558
+ " created by passing the corresponding arguments to generate or by the model's config default"
559
+ f" values. If you just want to change the default values of {object_type} consider passing"
560
+ f" them as arguments to `generate` instead of using a custom {object_type}."
561
+ )
562
+ default_list.extend(custom_list)
563
+ return default_list
564
+
565
+ def _greedy_search(
566
+ self,
567
+ input_ids: None,
568
+ max_length: Optional[int] = None,
569
+ pad_token_id: Optional[int] = None,
570
+ eos_token_id: Optional[int] = None,
571
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
572
+ trace: bool = True,
573
+ params: Optional[Dict[str, jnp.ndarray]] = None,
574
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
575
+ ):
576
+ # init values
577
+ max_length = max_length if max_length is not None else self.generation_config.max_length
578
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
579
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
580
+
581
+ batch_size, cur_len = input_ids.shape
582
+
583
+ eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
584
+ pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
585
+ cur_len = jnp.array(cur_len)
586
+
587
+ # per batch-item holding current token in loop.
588
+ sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
589
+ sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
590
+
591
+ # per batch-item state bit indicating if sentence has finished.
592
+ is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
593
+
594
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
595
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
596
+ model = self.decode if self.config.is_encoder_decoder else self
597
+ # initialize model specific kwargs
598
+ model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
599
+
600
+ # initialize state
601
+ state = GreedyState(
602
+ cur_len=cur_len,
603
+ sequences=sequences,
604
+ running_token=input_ids,
605
+ is_sent_finished=is_sent_finished,
606
+ model_kwargs=model_kwargs,
607
+ )
608
+
609
+ def greedy_search_cond_fn(state):
610
+ """state termination condition fn."""
611
+ has_reached_max_length = state.cur_len == max_length
612
+ all_sequence_finished = jnp.all(state.is_sent_finished)
613
+ finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
614
+ return ~finish_generation
615
+
616
+ def greedy_search_body_fn(state):
617
+ """state update fn."""
618
+ model_outputs = model(state.running_token, params=params, **state.model_kwargs)
619
+ logits = model_outputs.logits[:, -1]
620
+
621
+ # apply min_length, ...
622
+ logits = logits_processor(state.sequences, logits, state.cur_len)
623
+
624
+ next_token = jnp.argmax(logits, axis=-1)
625
+
626
+ next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished
627
+ next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
628
+ next_token = next_token[:, None]
629
+
630
+ next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
631
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
632
+ return GreedyState(
633
+ cur_len=state.cur_len + 1,
634
+ sequences=next_sequences,
635
+ running_token=next_token,
636
+ is_sent_finished=next_is_sent_finished,
637
+ model_kwargs=next_model_kwargs,
638
+ )
639
+
640
+ # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
641
+ if input_ids.shape[1] > 1:
642
+ state = greedy_search_body_fn(state)
643
+
644
+ if not trace:
645
+ state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state)
646
+ else:
647
+ state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state)
648
+
649
+ return FlaxGreedySearchOutput(sequences=state.sequences)
650
+
651
+ def _sample(
652
+ self,
653
+ input_ids: None,
654
+ max_length: Optional[int] = None,
655
+ pad_token_id: Optional[int] = None,
656
+ eos_token_id: Optional[int] = None,
657
+ prng_key: Optional[jnp.ndarray] = None,
658
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
659
+ logits_warper: Optional[FlaxLogitsProcessorList] = None,
660
+ trace: bool = True,
661
+ params: Optional[Dict[str, jnp.ndarray]] = None,
662
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
663
+ ):
664
+ # init values
665
+ max_length = max_length if max_length is not None else self.generation_config.max_length
666
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
667
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
668
+ prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0)
669
+
670
+ batch_size, cur_len = input_ids.shape
671
+
672
+ eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
673
+ pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
674
+ cur_len = jnp.array(cur_len)
675
+
676
+ # per batch-item holding current token in loop.
677
+ sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32)
678
+ sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0))
679
+
680
+ # per batch-item state bit indicating if sentence has finished.
681
+ is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_)
682
+
683
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
684
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
685
+ model = self.decode if self.config.is_encoder_decoder else self
686
+
687
+ # initialize model specific kwargs
688
+ model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs)
689
+
690
+ # initialize state
691
+ state = SampleState(
692
+ cur_len=cur_len,
693
+ sequences=sequences,
694
+ running_token=input_ids,
695
+ is_sent_finished=is_sent_finished,
696
+ prng_key=prng_key,
697
+ model_kwargs=model_kwargs,
698
+ )
699
+
700
+ def sample_search_cond_fn(state):
701
+ """state termination condition fn."""
702
+ has_reached_max_length = state.cur_len == max_length
703
+ all_sequence_finished = jnp.all(state.is_sent_finished)
704
+ finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished)
705
+ return ~finish_generation
706
+
707
+ def sample_search_body_fn(state):
708
+ """state update fn."""
709
+ prng_key, prng_key_next = jax.random.split(state.prng_key)
710
+ model_outputs = model(state.running_token, params=params, **state.model_kwargs)
711
+
712
+ logits = model_outputs.logits[:, -1]
713
+
714
+ # apply min_length, ...
715
+ logits = logits_processor(state.sequences, logits, state.cur_len)
716
+ # apply top_p, top_k, temperature
717
+ logits = logits_warper(logits, logits, state.cur_len)
718
+
719
+ next_token = jax.random.categorical(prng_key, logits, axis=-1)
720
+
721
+ next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished
722
+ next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id)
723
+ next_token = next_token[:, None]
724
+
725
+ next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len))
726
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
727
+
728
+ return SampleState(
729
+ cur_len=state.cur_len + 1,
730
+ sequences=next_sequences,
731
+ running_token=next_token,
732
+ is_sent_finished=next_is_sent_finished,
733
+ model_kwargs=next_model_kwargs,
734
+ prng_key=prng_key_next,
735
+ )
736
+
737
+ # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU
738
+ if input_ids.shape[1] > 1:
739
+ state = sample_search_body_fn(state)
740
+
741
+ if not trace:
742
+ state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state)
743
+ else:
744
+ state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state)
745
+
746
+ return FlaxSampleOutput(sequences=state.sequences)
747
+
748
+ def _beam_search(
749
+ self,
750
+ input_ids: None,
751
+ max_length: Optional[int] = None,
752
+ pad_token_id: Optional[int] = None,
753
+ eos_token_id: Optional[int] = None,
754
+ length_penalty: Optional[float] = None,
755
+ early_stopping: Optional[Union[bool, str]] = None,
756
+ logits_processor: Optional[FlaxLogitsProcessorList] = None,
757
+ trace: bool = True,
758
+ params: Optional[Dict[str, jnp.ndarray]] = None,
759
+ num_return_sequences: Optional[int] = None,
760
+ model_kwargs: Optional[Dict[str, jnp.ndarray]] = None,
761
+ ):
762
+ """
763
+ This beam search function is heavily inspired by Flax's official example:
764
+ https://github.com/google/flax/blob/main/examples/wmt/decode.py
765
+ """
766
+
767
+ def flatten_beam_dim(tensor):
768
+ """Flattens the first two dimensions of a non-scalar array."""
769
+ # ignore scalars (e.g. cache index)
770
+ if tensor.ndim == 0:
771
+ return tensor
772
+ return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:])
773
+
774
+ def unflatten_beam_dim(tensor, batch_size, num_beams):
775
+ """Unflattens the first, flat batch*beam dimension of a non-scalar array."""
776
+ # ignore scalars (e.g. cache index)
777
+ if tensor.ndim == 0:
778
+ return tensor
779
+ return tensor.reshape((batch_size, num_beams) + tensor.shape[1:])
780
+
781
+ def gather_beams(nested, beam_indices, batch_size, new_num_beams):
782
+ """
783
+ Gathers the beam slices indexed by beam_indices into new beam array.
784
+ """
785
+ batch_indices = jnp.reshape(
786
+ jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams)
787
+ )
788
+
789
+ def gather_fn(tensor):
790
+ # ignore scalars (e.g. cache index)
791
+ if tensor.ndim == 0:
792
+ return tensor
793
+ else:
794
+ return tensor[batch_indices, beam_indices]
795
+
796
+ return jax.tree_util.tree_map(gather_fn, nested)
797
+
798
+ # init values
799
+ max_length = max_length if max_length is not None else self.generation_config.max_length
800
+ pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id
801
+ eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id
802
+ length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty
803
+ early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping
804
+ num_return_sequences = (
805
+ num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences
806
+ )
807
+
808
+ batch_size, num_beams, cur_len = input_ids.shape
809
+
810
+ eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None)
811
+ pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32)
812
+ cur_len = jnp.array(cur_len)
813
+
814
+ # record the prompt length of decoder
815
+ decoder_prompt_len = input_ids.shape[-1]
816
+
817
+ # per batch,beam-item holding current token in loop.
818
+ sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
819
+ running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32)
820
+ running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0))
821
+
822
+ # per batch,beam-item state bit indicating if sentence has finished.
823
+ is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_)
824
+
825
+ # per batch,beam-item score, logprobs
826
+ running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1])
827
+ scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7)
828
+
829
+ # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop
830
+ # and pass it the `encoder_outputs`, which are part of the `model_kwargs`.
831
+ model = self.decode if self.config.is_encoder_decoder else self
832
+
833
+ # flatten beam dim
834
+ if "encoder_outputs" in model_kwargs:
835
+ model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim(
836
+ model_kwargs["encoder_outputs"]["last_hidden_state"]
837
+ )
838
+ for kwarg in ["attention_mask", "decoder_attention_mask"]:
839
+ if kwarg in model_kwargs:
840
+ model_kwargs[kwarg] = flatten_beam_dim(model_kwargs[kwarg])
841
+
842
+ # initialize model specific kwargs
843
+ model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs)
844
+
845
+ # initialize state
846
+ state = BeamSearchState(
847
+ cur_len=cur_len,
848
+ running_sequences=running_sequences,
849
+ running_scores=running_scores,
850
+ sequences=sequences,
851
+ scores=scores,
852
+ is_sent_finished=is_sent_finished,
853
+ model_kwargs=model_kwargs,
854
+ )
855
+
856
+ def beam_search_cond_fn(state):
857
+ """beam search state termination condition fn."""
858
+
859
+ # 1. is less than max length?
860
+ not_max_length_yet = state.cur_len < max_length
861
+
862
+ # 2. can the new beams still improve?
863
+ # early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion
864
+ # below for more details.
865
+ # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565
866
+ # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of
867
+ # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there.
868
+ if early_stopping == "never" and length_penalty > 0.0:
869
+ best_running_score = state.running_scores[:, :1] / (
870
+ (max_length - decoder_prompt_len) ** length_penalty
871
+ )
872
+ else:
873
+ best_running_score = state.running_scores[:, :1] / (
874
+ (state.cur_len - decoder_prompt_len) ** length_penalty
875
+ )
876
+ worst_finished_score = jnp.where(
877
+ state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7)
878
+ )
879
+ improvement_still_possible = jnp.any(best_running_score > worst_finished_score)
880
+
881
+ # 3. is there still a beam that has not finished?
882
+ still_open_beam = ~(jnp.all(state.is_sent_finished) & (early_stopping is True))
883
+
884
+ return not_max_length_yet & still_open_beam & improvement_still_possible
885
+
886
+ def beam_search_body_fn(state, input_ids_length=1):
887
+ """beam search state update fn."""
888
+ # 1. Forward current tokens
889
+ # Collect the current position slice along length to feed the fast
890
+ # autoregressive decoder model. Flatten the beam dimension into batch
891
+ # dimension for feeding into the model.
892
+ # unflatten beam dimension
893
+ # Unflatten beam dimension in attention cache arrays
894
+ input_token = flatten_beam_dim(
895
+ lax.dynamic_slice(
896
+ state.running_sequences,
897
+ (0, 0, state.cur_len - input_ids_length),
898
+ (batch_size, num_beams, input_ids_length),
899
+ )
900
+ )
901
+ model_outputs = model(input_token, params=params, **state.model_kwargs)
902
+
903
+ logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams)
904
+ cache = jax.tree_util.tree_map(
905
+ lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values
906
+ )
907
+
908
+ # adapt logits for FlaxMarianMTModel
909
+ logits = self._adapt_logits_for_beam_search(logits)
910
+
911
+ # 2. Compute log probs
912
+ # get log probabilities from logits,
913
+ # process logits with processors (*e.g.* min_length, ...), and
914
+ # add new logprobs to existing running logprobs scores.
915
+ log_probs = jax.nn.log_softmax(logits)
916
+ log_probs = logits_processor(
917
+ flatten_beam_dim(state.running_sequences), flatten_beam_dim(log_probs), state.cur_len
918
+ )
919
+ log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams)
920
+ log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2)
921
+ vocab_size = log_probs.shape[2]
922
+ log_probs = log_probs.reshape((batch_size, num_beams * vocab_size))
923
+
924
+ # 3. Retrieve top-K
925
+ # Each item in batch has num_beams * vocab_size candidate sequences.
926
+ # For each item, get the top 2*k candidates with the highest log-
927
+ # probabilities. We gather the top 2*K beams here so that even if the best
928
+ # K sequences reach EOS simultaneously, we have another K sequences
929
+ # remaining to continue the live beam search.
930
+ # Gather the top 2*K scores from _all_ beams.
931
+ # Gather 2*k top beams.
932
+ # Recover the beam index by floor division.
933
+ # Recover token id by modulo division and expand Id array for broadcasting.
934
+ # Update sequences for the 2*K top-k new sequences.
935
+ beams_to_keep = 2 * num_beams
936
+ topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep)
937
+ topk_beam_indices = topk_indices // vocab_size
938
+ topk_running_sequences = gather_beams(
939
+ state.running_sequences, topk_beam_indices, batch_size, beams_to_keep
940
+ )
941
+ topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2)
942
+ topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len))
943
+
944
+ # 4. Check which sequences have ended
945
+ # Update current sequences:
946
+ # Did any of these sequences reach an end marker?
947
+ # To prevent these just finished sequences from being added to the current sequences
948
+ # set of active beam search sequences, set their log probs to a very large
949
+ # negative value.
950
+ did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id
951
+ running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7)
952
+ # 5. Get running sequences scores for next
953
+ # Determine the top k beam indices (from top 2*k beams) from log probs
954
+ # and gather top k beams (from top 2*k beams).
955
+ next_topk_indices = lax.top_k(running_topk_log_probs, k=num_beams)[1]
956
+ next_running_sequences, next_running_scores = gather_beams(
957
+ [topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams
958
+ )
959
+
960
+ # 6. Process topk logits
961
+ # Further process log probs:
962
+ # - add length penalty
963
+ # - make sure no scores can be added anymore if beam is full
964
+ # - make sure still running sequences cannot be chosen as finalized beam
965
+ topk_log_probs = topk_log_probs / ((state.cur_len + 1 - decoder_prompt_len) ** length_penalty)
966
+ beams_in_batch_are_full = jnp.broadcast_to(
967
+ state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape
968
+ ) & (early_stopping is True)
969
+ add_penalty = ~did_topk_just_finished | beams_in_batch_are_full
970
+ topk_log_probs += add_penalty * np.array(-1.0e7)
971
+
972
+ # 7. Get scores, sequences, is sentence finished for next.
973
+ # Combine sequences, scores, and flags along the beam dimension and compare
974
+ # new finished sequence scores to existing finished scores and select the
975
+ # best from the new set of beams
976
+ merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1)
977
+ merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1)
978
+ merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1)
979
+ topk_merged_indices = lax.top_k(merged_scores, k=num_beams)[1]
980
+ next_sequences, next_scores, next_is_sent_finished = gather_beams(
981
+ [merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams
982
+ )
983
+
984
+ # 8. Update model kwargs.
985
+ # Determine the top k beam indices from the original set of all beams.
986
+ # With these, gather the top k beam-associated caches.
987
+ next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams)
988
+ next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams)
989
+ model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache)
990
+ next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs)
991
+
992
+ return BeamSearchState(
993
+ cur_len=state.cur_len + 1,
994
+ running_scores=next_running_scores,
995
+ running_sequences=next_running_sequences,
996
+ scores=next_scores,
997
+ sequences=next_sequences,
998
+ is_sent_finished=next_is_sent_finished,
999
+ model_kwargs=next_model_kwargs,
1000
+ )
1001
+
1002
+ # Always run first iteration outside of `lax.while_loop` to avoid calling `beam_search_cond_fn`
1003
+ # when `state.cur_len` equals `decoder_prompt_len`. This also helps to comply with TPU when
1004
+ # the very first prompt has sequence length > 1.
1005
+ state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state)
1006
+
1007
+ if not trace:
1008
+ state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state)
1009
+ else:
1010
+ state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state)
1011
+
1012
+ # Account for the edge-case where there are no finished sequences for a
1013
+ # particular batch item. If so, return running sequences for that batch item.
1014
+ none_finished = jnp.any(state.is_sent_finished, axis=1)
1015
+ sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences)
1016
+ scores = jnp.where(none_finished[:, None], state.scores, state.running_scores)
1017
+
1018
+ # Take best beams for each batch (the score is sorted in descending order)
1019
+ sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :])
1020
+ scores = flatten_beam_dim(scores[:, :num_return_sequences])
1021
+
1022
+ return FlaxBeamSearchOutput(sequences=sequences, scores=scores)
llmeval-env/lib/python3.10/site-packages/transformers/generation/logits_process.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import warnings
3
+ from abc import ABC
4
+ from copy import deepcopy
5
+ from typing import List, Optional, Union
6
+
7
+ import torch
8
+
9
+ from ..utils import add_start_docstrings, logging
10
+
11
+
12
+ logger = logging.get_logger(__name__)
13
+
14
+
15
+ STOPPING_CRITERIA_INPUTS_DOCSTRING = r"""
16
+ Args:
17
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
18
+ Indices of input sequence tokens in the vocabulary.
19
+
20
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
21
+ [`PreTrainedTokenizer.__call__`] for details.
22
+
23
+ [What are input IDs?](../glossary#input-ids)
24
+ scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
25
+ Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
26
+ or scores for each vocabulary token after SoftMax. If this stopping criteria depends on the `scores` input,
27
+ make sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`.
28
+ kwargs (`Dict[str, Any]`, *optional*):
29
+ Additional stopping criteria specific kwargs.
30
+
31
+ Return:
32
+ `torch.BoolTensor`. (`torch.BoolTensor` of shape `(batch_size, 1)`), where `True` indicates we stop generation
33
+ for a particular row, `True` indicates we should continue.
34
+
35
+ """
36
+
37
+
38
+ class StoppingCriteria(ABC):
39
+ """Abstract base class for all stopping criteria that can be applied during generation.
40
+
41
+ If your stopping criteria depends on the `scores` input, make sure you pass `return_dict_in_generate=True,
42
+ output_scores=True` to `generate`.
43
+ """
44
+
45
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
46
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
47
+ raise NotImplementedError("StoppingCriteria needs to be subclassed")
48
+
49
+
50
+ class MaxLengthCriteria(StoppingCriteria):
51
+ """
52
+ This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep
53
+ in mind for decoder-only type of transformers, this will include the initial prompted tokens.
54
+
55
+ Args:
56
+ max_length (`int`):
57
+ The maximum length that the output sequence can have in number of tokens.
58
+ max_position_embeddings (`int`, *optional*):
59
+ The maximum model length, as defined by the model's `config.max_position_embeddings` attribute.
60
+ """
61
+
62
+ def __init__(self, max_length: int, max_position_embeddings: Optional[int] = None):
63
+ self.max_length = max_length
64
+ self.max_position_embeddings = max_position_embeddings
65
+
66
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
67
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
68
+ cur_len = input_ids.shape[-1]
69
+ is_done = cur_len >= self.max_length
70
+ if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
71
+ logger.warning_once(
72
+ "This is a friendly reminder - the current text generation call will exceed the model's predefined "
73
+ f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
74
+ "exceptions, performance degradation, or nothing at all."
75
+ )
76
+ return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
77
+
78
+
79
+ class MaxNewTokensCriteria(StoppingCriteria):
80
+ """
81
+ This class can be used to stop generation whenever the generated number of tokens exceeds `max_new_tokens`. Keep in
82
+ mind for decoder-only type of transformers, this will **not** include the initial prompted tokens. This is very
83
+ close to `MaxLengthCriteria` but ignores the number of initial tokens.
84
+
85
+ Args:
86
+ start_length (`int`):
87
+ The number of initial tokens.
88
+ max_new_tokens (`int`):
89
+ The maximum number of tokens to generate.
90
+ """
91
+
92
+ def __init__(self, start_length: int, max_new_tokens: int):
93
+ warnings.warn(
94
+ "The class `MaxNewTokensCriteria` is deprecated. "
95
+ f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
96
+ "with `max_length = start_length + max_new_tokens` instead.",
97
+ FutureWarning,
98
+ )
99
+ self.start_length = start_length
100
+ self.max_new_tokens = max_new_tokens
101
+ self.max_length = start_length + max_new_tokens
102
+
103
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
104
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
105
+ is_done = input_ids.shape[-1] >= self.max_length
106
+ return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
107
+
108
+
109
+ class MaxTimeCriteria(StoppingCriteria):
110
+ """
111
+ This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the
112
+ time will start being counted when you initialize this function. You can override this by passing an
113
+ `initial_time`.
114
+
115
+ Args:
116
+ max_time (`float`):
117
+ The maximum allowed time in seconds for the generation.
118
+ initial_time (`float`, *optional*, defaults to `time.time()`):
119
+ The start of the generation allowed time.
120
+ """
121
+
122
+ def __init__(self, max_time: float, initial_timestamp: Optional[float] = None):
123
+ self.max_time = max_time
124
+ self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp
125
+
126
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
127
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
128
+ is_done = time.time() - self.initial_timestamp > self.max_time
129
+ return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool)
130
+
131
+
132
+ class EosTokenCriteria(StoppingCriteria):
133
+ """
134
+ This class can be used to stop generation whenever the "end-of-sequence" token is generated.
135
+ By default, it uses the `model.generation_config.eos_token_id`.
136
+
137
+ Args:
138
+ eos_token_id (`Union[int, List[int]]`):
139
+ The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.
140
+ """
141
+
142
+ def __init__(self, eos_token_id: Union[int, List[int]]):
143
+ if isinstance(eos_token_id, int):
144
+ eos_token_id = [eos_token_id]
145
+ self.eos_token_id = torch.tensor(eos_token_id)
146
+
147
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
148
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
149
+ if input_ids.device.type == "mps":
150
+ # https://github.com/pytorch/pytorch/issues/77764#issuecomment-2067838075
151
+ is_done = (
152
+ input_ids[:, -1]
153
+ .tile(self.eos_token_id.shape[0], 1)
154
+ .eq(self.eos_token_id.unsqueeze(1).to(input_ids.device))
155
+ .sum(dim=0)
156
+ .bool()
157
+ .squeeze()
158
+ )
159
+ else:
160
+ is_done = torch.isin(input_ids[:, -1], self.eos_token_id.to(input_ids.device))
161
+ return is_done
162
+
163
+
164
+ class StoppingCriteriaList(list):
165
+ @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING)
166
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor:
167
+ is_done = torch.full((input_ids.shape[0],), False, device=input_ids.device)
168
+ for criteria in self:
169
+ is_done = is_done | criteria(input_ids, scores, **kwargs)
170
+ return is_done
171
+
172
+ @property
173
+ def max_length(self) -> Optional[int]:
174
+ for stopping_criterium in self:
175
+ if isinstance(stopping_criterium, MaxLengthCriteria):
176
+ return stopping_criterium.max_length
177
+ elif isinstance(stopping_criterium, MaxNewTokensCriteria):
178
+ return stopping_criterium.max_length
179
+ return None
180
+
181
+
182
+ def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int) -> StoppingCriteriaList:
183
+ stopping_max_length = stopping_criteria.max_length
184
+ new_stopping_criteria = deepcopy(stopping_criteria)
185
+ if stopping_max_length is not None and stopping_max_length != max_length:
186
+ warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", UserWarning)
187
+ elif stopping_max_length is None:
188
+ new_stopping_criteria.append(MaxLengthCriteria(max_length=max_length))
189
+ return new_stopping_criteria
llmeval-env/lib/python3.10/site-packages/transformers/generation/streamers.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2023 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from queue import Queue
17
+ from typing import TYPE_CHECKING, Optional
18
+
19
+
20
+ if TYPE_CHECKING:
21
+ from ..models.auto import AutoTokenizer
22
+
23
+
24
+ class BaseStreamer:
25
+ """
26
+ Base class from which `.generate()` streamers should inherit.
27
+ """
28
+
29
+ def put(self, value):
30
+ """Function that is called by `.generate()` to push new tokens"""
31
+ raise NotImplementedError()
32
+
33
+ def end(self):
34
+ """Function that is called by `.generate()` to signal the end of generation"""
35
+ raise NotImplementedError()
36
+
37
+
38
+ class TextStreamer(BaseStreamer):
39
+ """
40
+ Simple text streamer that prints the token(s) to stdout as soon as entire words are formed.
41
+
42
+ <Tip warning={true}>
43
+
44
+ The API for the streamer classes is still under development and may change in the future.
45
+
46
+ </Tip>
47
+
48
+ Parameters:
49
+ tokenizer (`AutoTokenizer`):
50
+ The tokenized used to decode the tokens.
51
+ skip_prompt (`bool`, *optional*, defaults to `False`):
52
+ Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
53
+ decode_kwargs (`dict`, *optional*):
54
+ Additional keyword arguments to pass to the tokenizer's `decode` method.
55
+
56
+ Examples:
57
+
58
+ ```python
59
+ >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer
60
+
61
+ >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
62
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
63
+ >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
64
+ >>> streamer = TextStreamer(tok)
65
+
66
+ >>> # Despite returning the usual output, the streamer will also print the generated text to stdout.
67
+ >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20)
68
+ An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,
69
+ ```
70
+ """
71
+
72
+ def __init__(self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, **decode_kwargs):
73
+ self.tokenizer = tokenizer
74
+ self.skip_prompt = skip_prompt
75
+ self.decode_kwargs = decode_kwargs
76
+
77
+ # variables used in the streaming process
78
+ self.token_cache = []
79
+ self.print_len = 0
80
+ self.next_tokens_are_prompt = True
81
+
82
+ def put(self, value):
83
+ """
84
+ Receives tokens, decodes them, and prints them to stdout as soon as they form entire words.
85
+ """
86
+ if len(value.shape) > 1 and value.shape[0] > 1:
87
+ raise ValueError("TextStreamer only supports batch size 1")
88
+ elif len(value.shape) > 1:
89
+ value = value[0]
90
+
91
+ if self.skip_prompt and self.next_tokens_are_prompt:
92
+ self.next_tokens_are_prompt = False
93
+ return
94
+
95
+ # Add the new token to the cache and decodes the entire thing.
96
+ self.token_cache.extend(value.tolist())
97
+ text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
98
+
99
+ # After the symbol for a new line, we flush the cache.
100
+ if text.endswith("\n"):
101
+ printable_text = text[self.print_len :]
102
+ self.token_cache = []
103
+ self.print_len = 0
104
+ # If the last token is a CJK character, we print the characters.
105
+ elif len(text) > 0 and self._is_chinese_char(ord(text[-1])):
106
+ printable_text = text[self.print_len :]
107
+ self.print_len += len(printable_text)
108
+ # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
109
+ # which may change with the subsequent token -- there are probably smarter ways to do this!)
110
+ else:
111
+ printable_text = text[self.print_len : text.rfind(" ") + 1]
112
+ self.print_len += len(printable_text)
113
+
114
+ self.on_finalized_text(printable_text)
115
+
116
+ def end(self):
117
+ """Flushes any remaining cache and prints a newline to stdout."""
118
+ # Flush the cache, if it exists
119
+ if len(self.token_cache) > 0:
120
+ text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs)
121
+ printable_text = text[self.print_len :]
122
+ self.token_cache = []
123
+ self.print_len = 0
124
+ else:
125
+ printable_text = ""
126
+
127
+ self.next_tokens_are_prompt = True
128
+ self.on_finalized_text(printable_text, stream_end=True)
129
+
130
+ def on_finalized_text(self, text: str, stream_end: bool = False):
131
+ """Prints the new text to stdout. If the stream is ending, also prints a newline."""
132
+ print(text, flush=True, end="" if not stream_end else None)
133
+
134
+ def _is_chinese_char(self, cp):
135
+ """Checks whether CP is the codepoint of a CJK character."""
136
+ # This defines a "chinese character" as anything in the CJK Unicode block:
137
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
138
+ #
139
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
140
+ # despite its name. The modern Korean Hangul alphabet is a different block,
141
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
142
+ # space-separated words, so they are not treated specially and handled
143
+ # like the all of the other languages.
144
+ if (
145
+ (cp >= 0x4E00 and cp <= 0x9FFF)
146
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
147
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
148
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
149
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
150
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
151
+ or (cp >= 0xF900 and cp <= 0xFAFF)
152
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
153
+ ): #
154
+ return True
155
+
156
+ return False
157
+
158
+
159
+ class TextIteratorStreamer(TextStreamer):
160
+ """
161
+ Streamer that stores print-ready text in a queue, to be used by a downstream application as an iterator. This is
162
+ useful for applications that benefit from acessing the generated text in a non-blocking way (e.g. in an interactive
163
+ Gradio demo).
164
+
165
+ <Tip warning={true}>
166
+
167
+ The API for the streamer classes is still under development and may change in the future.
168
+
169
+ </Tip>
170
+
171
+ Parameters:
172
+ tokenizer (`AutoTokenizer`):
173
+ The tokenized used to decode the tokens.
174
+ skip_prompt (`bool`, *optional*, defaults to `False`):
175
+ Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots.
176
+ timeout (`float`, *optional*):
177
+ The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions
178
+ in `.generate()`, when it is called in a separate thread.
179
+ decode_kwargs (`dict`, *optional*):
180
+ Additional keyword arguments to pass to the tokenizer's `decode` method.
181
+
182
+ Examples:
183
+
184
+ ```python
185
+ >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer
186
+ >>> from threading import Thread
187
+
188
+ >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2")
189
+ >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
190
+ >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt")
191
+ >>> streamer = TextIteratorStreamer(tok)
192
+
193
+ >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way.
194
+ >>> generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20)
195
+ >>> thread = Thread(target=model.generate, kwargs=generation_kwargs)
196
+ >>> thread.start()
197
+ >>> generated_text = ""
198
+ >>> for new_text in streamer:
199
+ ... generated_text += new_text
200
+ >>> generated_text
201
+ 'An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,'
202
+ ```
203
+ """
204
+
205
+ def __init__(
206
+ self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs
207
+ ):
208
+ super().__init__(tokenizer, skip_prompt, **decode_kwargs)
209
+ self.text_queue = Queue()
210
+ self.stop_signal = None
211
+ self.timeout = timeout
212
+
213
+ def on_finalized_text(self, text: str, stream_end: bool = False):
214
+ """Put the new text in the queue. If the stream is ending, also put a stop signal in the queue."""
215
+ self.text_queue.put(text, timeout=self.timeout)
216
+ if stream_end:
217
+ self.text_queue.put(self.stop_signal, timeout=self.timeout)
218
+
219
+ def __iter__(self):
220
+ return self
221
+
222
+ def __next__(self):
223
+ value = self.text_queue.get(timeout=self.timeout)
224
+ if value == self.stop_signal:
225
+ raise StopIteration()
226
+ else:
227
+ return value
llmeval-env/lib/python3.10/site-packages/transformers/generation/tf_logits_process.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import inspect
17
+ from typing import List, Tuple
18
+
19
+ import numpy as np
20
+ import tensorflow as tf
21
+
22
+ from ..tf_utils import stable_softmax
23
+ from ..utils import add_start_docstrings
24
+ from ..utils.logging import get_logger
25
+
26
+
27
+ logger = get_logger(__name__)
28
+
29
+
30
+ TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING = r"""
31
+ Args:
32
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
33
+ Indices of input sequence tokens in the vocabulary.
34
+
35
+ Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
36
+ [`PreTrainedTokenizer.__call__`] for details.
37
+
38
+ [What are input IDs?](../glossary#input-ids)
39
+ scores (`tf.Tensor` of shape `(batch_size, config.vocab_size)`):
40
+ Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam
41
+ search or log softmax for each vocabulary token when using beam search.
42
+ cur_len (`int`):
43
+ The current length of valid input sequence tokens. In the TF implementation, the input_ids' sequence length
44
+ is the maximum length generate can produce, and we need to know which of its tokens are valid.
45
+ kwargs (`Dict[str, Any]`, *optional*):
46
+ Additional logits processor specific kwargs.
47
+
48
+ Return:
49
+ `tf.Tensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.
50
+ """
51
+
52
+
53
+ class TFLogitsProcessor:
54
+ """Abstract base class for all logit processors that can be applied during generation."""
55
+
56
+ @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
57
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
58
+ """TF method for processing logits."""
59
+ raise NotImplementedError(
60
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
61
+ )
62
+
63
+
64
+ class TFLogitsWarper:
65
+ """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling."""
66
+
67
+ @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
68
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
69
+ """TF method for warping logits."""
70
+ raise NotImplementedError(
71
+ f"{self.__class__} is an abstract class. Only classes inheriting this class can be called."
72
+ )
73
+
74
+
75
+ class TFLogitsProcessorList(list):
76
+ """
77
+ This class can be used to create a list of [`TFLogitsProcessor`] to subsequently process a `scores` input tensor.
78
+ This class inherits from list and adds a specific *__call__* method to apply each [`TFLogitsProcessor`] to the
79
+ inputs.
80
+ """
81
+
82
+ @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING)
83
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int, **kwargs) -> tf.Tensor:
84
+ for processor in self:
85
+ function_args = inspect.signature(processor.__call__).parameters
86
+ if len(function_args) > 3:
87
+ if not all(arg in kwargs for arg in list(function_args.keys())[2:]):
88
+ raise ValueError(
89
+ f"Make sure that all the required parameters: {list(function_args.keys())} for "
90
+ f"{processor.__class__} are passed to the logits processor."
91
+ )
92
+ scores = processor(input_ids, scores, cur_len, **kwargs)
93
+ else:
94
+ scores = processor(input_ids, scores, cur_len)
95
+ return scores
96
+
97
+
98
+ class TFTemperatureLogitsWarper(TFLogitsWarper):
99
+ r"""
100
+ [`TFLogitsWarper`] for temperature (exponential scaling output probability distribution).
101
+
102
+ Args:
103
+ temperature (`float`):
104
+ The value used to module the logits distribution.
105
+ """
106
+
107
+ def __init__(self, temperature: float):
108
+ if not isinstance(temperature, float) or not (temperature > 0):
109
+ raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}")
110
+
111
+ self.temperature = temperature
112
+
113
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
114
+ scores = scores / self.temperature
115
+ return scores
116
+
117
+
118
+ class TFTopKLogitsWarper(TFLogitsWarper):
119
+ r"""
120
+ [`TFLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements.
121
+
122
+ Args:
123
+ top_k (`int`):
124
+ The number of highest probability vocabulary tokens to keep for top-k-filtering.
125
+ filter_value (`float`, *optional*, defaults to -inf):
126
+ All filtered values will be set to this float value.
127
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
128
+ Minimum number of tokens that cannot be filtered.
129
+ """
130
+
131
+ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
132
+ if not isinstance(top_k, int) or top_k <= 0:
133
+ raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}")
134
+
135
+ self.top_k = max(top_k, min_tokens_to_keep)
136
+ self.filter_value = filter_value
137
+
138
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
139
+ top_k = min(self.top_k, scores.shape[-1]) # Safety check
140
+ # Boolean mask containing all tokens with a probability less than the last token of the top-k
141
+ indices_to_remove = scores < tf.math.top_k(scores, k=top_k)[0][..., -1:]
142
+ next_scores = tf.where(indices_to_remove, self.filter_value, scores)
143
+ return next_scores
144
+
145
+
146
+ class TFTopPLogitsWarper(TFLogitsWarper):
147
+ """
148
+ [`TFLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off.
149
+
150
+ Args:
151
+ top_p (`float`):
152
+ If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
153
+ higher are kept for generation.
154
+ filter_value (`float`, *optional*, defaults to -inf):
155
+ All filtered values will be set to this float value.
156
+ min_tokens_to_keep (`int`, *optional*, defaults to 1):
157
+ Minimum number of tokens that cannot be filtered.
158
+ """
159
+
160
+ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1):
161
+ if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0):
162
+ raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}")
163
+ if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1):
164
+ raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}")
165
+
166
+ self.top_p = top_p
167
+ self.filter_value = filter_value
168
+ self.min_tokens_to_keep = min_tokens_to_keep
169
+
170
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
171
+ topk_scores, topk_indices = tf.math.top_k(scores, scores.shape[-1])
172
+
173
+ mask_scores = tf.fill(scores.shape, self.filter_value)
174
+ cumulative_probs = tf.math.cumsum(stable_softmax(topk_scores, axis=-1), axis=-1)
175
+ score_mask = cumulative_probs < self.top_p
176
+
177
+ # Also include the token that is higher than top_p (the first false = shift and insert a True on the left)
178
+ score_mask = tf.concat((tf.ones([score_mask.shape[0], 1], dtype=tf.bool), score_mask[:, :-1]), axis=-1)
179
+
180
+ # Ensure min tokens to keep
181
+ score_mask = tf.concat(
182
+ (
183
+ tf.ones([score_mask.shape[0], self.min_tokens_to_keep], dtype=tf.bool),
184
+ score_mask[:, self.min_tokens_to_keep :],
185
+ ),
186
+ axis=-1,
187
+ )
188
+
189
+ # Mask the values that do not fit the criteria
190
+ topk_next_scores = tf.where(score_mask, topk_scores, mask_scores)
191
+
192
+ # Undo the topk sorting: converts the 2D matrix of per-row original indices of shape (batch_size, vocab_size)
193
+ # to a 3D tensor of shape (batch_size, vocab_size, 2) containing the original score coordinate, from which we
194
+ # can scatter (i.e. `scatter_indices[row, col, :]` is a tensor containing `[row, topk_indices[row, col]]`)
195
+ scatter_rows = tf.tile(tf.expand_dims(tf.range(topk_indices.shape[0]), axis=-1), [1, topk_indices.shape[-1]])
196
+ scatter_indices = tf.stack((scatter_rows, topk_indices), axis=-1)
197
+ next_scores = tf.scatter_nd(scatter_indices, topk_next_scores, shape=topk_next_scores.shape)
198
+
199
+ return next_scores
200
+
201
+
202
+ class TFMinLengthLogitsProcessor(TFLogitsProcessor):
203
+ r"""
204
+ [`TFLogitsProcessor`] enforcing a min-length by setting EOS probability to 0.
205
+
206
+ Args:
207
+ min_length (`int`):
208
+ The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`.
209
+ eos_token_id (`int`):
210
+ The id of the *end-of-sequence* token.
211
+ """
212
+
213
+ def __init__(self, min_length: int, eos_token_id: int):
214
+ if not isinstance(min_length, int) or min_length < 0:
215
+ raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}")
216
+
217
+ if not isinstance(eos_token_id, int) or eos_token_id < 0:
218
+ raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}")
219
+
220
+ self.min_length = min_length
221
+ self.eos_token_id = eos_token_id
222
+
223
+ def _apply_eos_token_mask(self, scores: tf.Tensor) -> tf.Tensor:
224
+ eos_token_id_mask = tf.range(scores.shape[-1]) == self.eos_token_id
225
+ scores = tf.where(eos_token_id_mask, float("-inf"), scores)
226
+ return scores
227
+
228
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
229
+ # applies eos token masking if the first argument is true
230
+ scores = tf.cond(
231
+ tf.less(cur_len, self.min_length),
232
+ lambda: self._apply_eos_token_mask(scores),
233
+ lambda: tf.identity(scores),
234
+ )
235
+ return scores
236
+
237
+
238
+ class TFRepetitionPenaltyLogitsProcessor(TFLogitsProcessor):
239
+ r"""
240
+ [`TFLogitsProcessor`] enforcing an exponential penalty on repeated sequences.
241
+
242
+ Args:
243
+ repetition_penalty (`float`):
244
+ The parameter for repetition penalty. 1.0 means no penalty. See [this
245
+ paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
246
+ """
247
+
248
+ def __init__(self, penalty: float):
249
+ if not isinstance(penalty, float) or not (penalty > 0):
250
+ raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}")
251
+
252
+ self.penalty = penalty
253
+
254
+ def _create_score_penalties(self, input_ids: tf.Tensor, logits: tf.Tensor) -> tf.Tensor:
255
+ # We want to populate the penalties in the positions of `input_ids`. Since XLA can't handle shapes unknown
256
+ # before runtime, `tf.unique` can't be used. Therefore, we may have redundant updates, when a given row has
257
+ # the same token multiple times.
258
+
259
+ # Gathers the penalties to apply
260
+ logit_penalties = tf.gather(logits, input_ids, axis=1, batch_dims=1)
261
+ logit_penalties = tf.where(logit_penalties > 0, 1 / self.penalty, logit_penalties)
262
+ logit_penalties = tf.where(logit_penalties < 0, self.penalty, logit_penalties)
263
+
264
+ # Scatters the penalties
265
+ token_penalties = tf.ones(logits.shape)
266
+ batch_size = input_ids.shape[0]
267
+ seq_len = tf.shape(input_ids)[1] # the sequence length has dynamic size, hence the dynamic shape
268
+ indexable_prev_input_ids = tf.concat(
269
+ (
270
+ tf.expand_dims(tf.repeat(tf.range(batch_size), seq_len), axis=-1),
271
+ tf.expand_dims(tf.reshape(input_ids, [-1]), axis=-1),
272
+ ),
273
+ axis=1,
274
+ )
275
+ token_penalties = tf.tensor_scatter_nd_update(
276
+ token_penalties, indices=indexable_prev_input_ids, updates=tf.reshape(logit_penalties, [-1])
277
+ )
278
+ return token_penalties
279
+
280
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
281
+ score_penalties = self._create_score_penalties(input_ids[:, :cur_len], scores)
282
+
283
+ scores = tf.math.multiply(scores, score_penalties)
284
+
285
+ return scores
286
+
287
+
288
+ class TFNoBadWordsLogitsProcessor(TFLogitsProcessor):
289
+ """
290
+ [`TFLogitsProcessor`] that enforces that specified sequences will never be sampled.
291
+
292
+ Args:
293
+ bad_words_ids (`List[List[int]]`):
294
+ List of list of token ids that are not allowed to be generated. In order to get the tokens of the words
295
+ that should not appear in the generated text, make sure to set `add_prefix_space=True` when initializing
296
+ the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The `add_prefix_space`
297
+ argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours come from
298
+ `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers).
299
+ eos_token_id (`int`):
300
+ The id of the *end-of-sequence* token.
301
+ """
302
+
303
+ def __init__(self, bad_words_ids: List[List[int]], eos_token_id: int):
304
+ if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0:
305
+ raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.")
306
+ if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids):
307
+ raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.")
308
+ if any(
309
+ any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids)
310
+ for bad_word_ids in bad_words_ids
311
+ ):
312
+ raise ValueError(
313
+ f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}."
314
+ )
315
+
316
+ # stores the information about bad words in three tensors:
317
+ # 1. a rectangular tensor with the forbidden sequences (padded with `-1`), for full data comparisons
318
+ self.bad_word_seqs_ids = tf.ragged.constant(bad_words_ids).to_tensor(default_value=-1)
319
+ # 2. a tensor with the unpadded length of each forbidden sequence, for quick length comparisons
320
+ bad_word_seqs_len = [len(bad_words) for bad_words in bad_words_ids]
321
+ if any(word_len == 0 for word_len in bad_word_seqs_len):
322
+ raise ValueError(f"Banned words token sequences {bad_words_ids} cannot have an empty list")
323
+ self.bad_word_seqs_len = tf.convert_to_tensor(bad_word_seqs_len, dtype=tf.int32)
324
+ # 3. a tensor containing the last token for each sequence, for easy access to the tokens that may be banned
325
+ self.seq_forbidden_tokens = tf.convert_to_tensor([bad_words[-1] for bad_words in bad_words_ids])
326
+
327
+ def _calc_row_banned_bad_tokens(self, row_input_ids: tf.Tensor) -> tf.Tensor:
328
+ def _tokens_match(bad_word_seq_number):
329
+ def _len_one():
330
+ # If the bad sequence only has one token, always mask it
331
+ return tf.cond(
332
+ tf.math.equal(self.bad_word_seqs_len[bad_word_seq_number], 1),
333
+ lambda: tf.ones((), dtype=tf.bool),
334
+ _len_greater_than_cur_len,
335
+ )
336
+
337
+ def _len_greater_than_cur_len():
338
+ # Otherwise, if the bad sequence is longer than the current length they can't ever match
339
+ return tf.cond(
340
+ tf.math.greater(self.bad_word_seqs_len[bad_word_seq_number], tf.shape(row_input_ids)[0]),
341
+ lambda: tf.zeros((), dtype=tf.bool),
342
+ _match_found,
343
+ )
344
+
345
+ def _match_found():
346
+ # Finaly, runs the actual comparison. Can only be called if the previous comparisons do not yield
347
+ # an answer (otherwise we get indexing exceptions)
348
+ compare_len = self.bad_word_seqs_len[bad_word_seq_number] - 1
349
+ return tf.cond(
350
+ tf.math.reduce_all(
351
+ tf.math.equal(
352
+ row_input_ids[-compare_len:], self.bad_word_seqs_ids[bad_word_seq_number, :compare_len]
353
+ )
354
+ ),
355
+ lambda: tf.ones((), dtype=tf.bool),
356
+ lambda: tf.zeros((), dtype=tf.bool),
357
+ )
358
+
359
+ match = _len_one()
360
+ return match
361
+
362
+ # Compares the current row against all bad word sequences, obtaining a mask with the matches.
363
+ match_mask = tf.map_fn(_tokens_match, tf.range(self.bad_word_seqs_ids.shape[0]), fn_output_signature=tf.bool)
364
+ row_banned_tokens = self.seq_forbidden_tokens[match_mask]
365
+ return row_banned_tokens
366
+
367
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
368
+ # We want to mask some banned tokens, at a score level. Since the banned tokens depend on the previous
369
+ # `input_ids`, they may have a different length for each row, and they may even be empty for some rows.
370
+ # To remain simple and XLA-compatible, we work on a per-row fashion.
371
+ # TODO (Joao): this function might trigger XLA retracing as `cur_len` increases. Fix it if it becomes
372
+ # a frequent choke point. (make `cur_len` a tensor?)
373
+ def _get_row_updated_score(row_inputs: Tuple[tf.Tensor]) -> tf.Tensor:
374
+ row_input_ids, row_score = row_inputs
375
+ banned_tokens = self._calc_row_banned_bad_tokens(row_input_ids[:cur_len])
376
+ banned_tokens_mask = tf.scatter_nd(
377
+ indices=tf.expand_dims(banned_tokens, axis=-1),
378
+ updates=tf.ones_like(banned_tokens, dtype=tf.bool),
379
+ shape=row_score.shape,
380
+ )
381
+ row_score = tf.where(banned_tokens_mask, -float("inf"), row_score)
382
+ return row_score
383
+
384
+ scores = tf.map_fn(_get_row_updated_score, (input_ids, scores), fn_output_signature=tf.float32)
385
+ return scores
386
+
387
+
388
+ class TFNoRepeatNGramLogitsProcessor(TFLogitsProcessor):
389
+ r"""
390
+ [`TFLogitsProcessor`] that enforces no repetition of n-grams. See
391
+ [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345).
392
+
393
+ Args:
394
+ ngram_size (`int`):
395
+ All ngrams of size `ngram_size` can only occur once.
396
+ """
397
+
398
+ def __init__(self, ngram_size: int):
399
+ if not isinstance(ngram_size, int) or ngram_size <= 0:
400
+ raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}")
401
+ self.ngram_size = ngram_size
402
+
403
+ def calc_banned_ngram_tokens(self, input_ids, num_hypos, cur_len):
404
+ # Copied from fairseq for no_repeat_ngram in beam_search
405
+ if cur_len + 1 < self.ngram_size:
406
+ # return no banned tokens if we haven't generated ngram_size tokens yet
407
+ return [[] for _ in range(num_hypos)]
408
+ generated_ngrams = [{} for _ in range(num_hypos)]
409
+ prev_input_ids = input_ids[:, :cur_len]
410
+ for idx in range(num_hypos):
411
+ gen_tokens = prev_input_ids[idx].numpy().tolist()
412
+ generated_ngram = generated_ngrams[idx]
413
+ for ngram in zip(*[gen_tokens[i:] for i in range(self.ngram_size)]):
414
+ prev_ngram_tuple = tuple(ngram[:-1])
415
+ generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]]
416
+
417
+ def _get_generated_ngrams(hypo_idx):
418
+ # Before decoding the next token, prevent decoding of ngrams that have already appeared
419
+ start_idx = cur_len + 1 - self.ngram_size
420
+ ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist())
421
+ return generated_ngrams[hypo_idx].get(ngram_idx, [])
422
+
423
+ banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)]
424
+
425
+ return banned_tokens
426
+
427
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
428
+ # TODO (joao): enable XLA on this logits processor. See discussion and attempts in
429
+ # https://github.com/huggingface/transformers/pull/16974
430
+ if not tf.executing_eagerly():
431
+ raise NotImplementedError("TFNoRepeatNGramLogitsProcessor is only implemented for eager execution.")
432
+
433
+ batch_size, vocab_size = scores.shape
434
+ banned_tokens = self.calc_banned_ngram_tokens(input_ids, batch_size, cur_len)
435
+
436
+ # create banned_tokens boolean mask
437
+ banned_tokens_indices_mask = []
438
+ for banned_tokens_slice in banned_tokens:
439
+ banned_tokens_indices_mask.append(
440
+ [True if token in banned_tokens_slice else False for token in range(vocab_size)]
441
+ )
442
+
443
+ scores = tf.where(tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf"), scores)
444
+
445
+ return scores
446
+
447
+
448
+ class TFForcedBOSTokenLogitsProcessor(TFLogitsProcessor):
449
+ r"""
450
+ [`TFLogitsProcessor`] that enforces the specified token as the first generated token.
451
+
452
+ Args:
453
+ bos_token_id (`int`):
454
+ The id of the token to force as the first generated token.
455
+ """
456
+
457
+ def __init__(self, bos_token_id: int):
458
+ if bos_token_id < 0:
459
+ raise ValueError(f"The forced bos token id must be a non-negative integer, got {bos_token_id}")
460
+ self.bos_token_id = bos_token_id
461
+
462
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
463
+ if cur_len == 1:
464
+ batch_size, num_tokens = scores.shape
465
+ # sets the score to 0 in the bos_token_id column
466
+ scores = tf.zeros((batch_size, 1))
467
+ # sets the score to -inf everywhere else
468
+ if self.bos_token_id > 0:
469
+ scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.bos_token_id)), scores), axis=-1)
470
+ if self.bos_token_id < (num_tokens - 1):
471
+ scores = tf.concat(
472
+ (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.bos_token_id))),
473
+ axis=-1,
474
+ )
475
+ return scores
476
+
477
+
478
+ class TFForcedEOSTokenLogitsProcessor(TFLogitsProcessor):
479
+ r"""
480
+ [`TFLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached.
481
+
482
+ Args:
483
+ max_length (`int`):
484
+ The maximum length of the sequence to be generated.
485
+ eos_token_id (`int`):
486
+ The id of the token to force as the last generated token when `max_length` is reached.
487
+ """
488
+
489
+ def __init__(self, max_length: int, eos_token_id: int):
490
+ self.max_length = max_length
491
+ if eos_token_id < 0:
492
+ raise ValueError(f"The forced eos token id must be a non-negative integer, got {eos_token_id}")
493
+ self.eos_token_id = eos_token_id
494
+
495
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
496
+ if cur_len == self.max_length - 1:
497
+ batch_size, num_tokens = scores.shape
498
+ # sets the score to 0 in the eos_token_id column
499
+ scores = tf.zeros((batch_size, 1))
500
+ # sets the score to -inf everywhere else
501
+ if self.eos_token_id > 0:
502
+ scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.eos_token_id)), scores), axis=-1)
503
+ if self.eos_token_id < (num_tokens - 1):
504
+ scores = tf.concat(
505
+ (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.eos_token_id))),
506
+ axis=-1,
507
+ )
508
+ return scores
509
+
510
+
511
+ class TFSuppressTokensAtBeginLogitsProcessor(TFLogitsProcessor):
512
+ r"""
513
+ [`TFSuppressTokensAtBeginLogitsProcessor`] suppresses a list of tokens as soon as the `generate` function starts
514
+ generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not
515
+ sampled at the begining of the generation.
516
+ """
517
+
518
+ def __init__(self, begin_suppress_tokens, begin_index):
519
+ self.begin_suppress_tokens = list(begin_suppress_tokens)
520
+ self.begin_index = begin_index
521
+
522
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
523
+ scores = tf.cond(
524
+ tf.equal(cur_len, self.begin_index),
525
+ lambda: tf.tensor_scatter_nd_update(
526
+ scores,
527
+ indices=[[i, token] for i in range(scores.shape[0]) for token in self.begin_suppress_tokens],
528
+ updates=[-float("inf") for _ in range(scores.shape[0] * len(self.begin_suppress_tokens))],
529
+ ),
530
+ lambda: scores,
531
+ )
532
+ return scores
533
+
534
+
535
+ class TFSuppressTokensLogitsProcessor(TFLogitsProcessor):
536
+ r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they
537
+ are not sampled."""
538
+
539
+ def __init__(self, suppress_tokens):
540
+ self.suppress_tokens = list(suppress_tokens)
541
+
542
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
543
+ scores = tf.tensor_scatter_nd_update(
544
+ scores,
545
+ indices=[[i, token] for i in range(scores.shape[0]) for token in self.suppress_tokens],
546
+ updates=[-float("inf") for _ in range(scores.shape[0] * len(self.suppress_tokens))],
547
+ )
548
+ return scores
549
+
550
+
551
+ class TFForceTokensLogitsProcessor(TFLogitsProcessor):
552
+ r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token
553
+ indices that will be forced before sampling. The processor will set their log probs to `0` and all other tokens to
554
+ `-inf` so that they are sampled at their corresponding index."""
555
+
556
+ def __init__(self, force_token_map: List[List[int]]):
557
+ force_token_map = dict(force_token_map)
558
+ # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
559
+ # index of the array corresponds to the index of the token to be forced, for XLA compatibility.
560
+ # Indexes without forced tokens will have an negative value.
561
+ force_token_array = np.ones((max(force_token_map.keys()) + 1), dtype=np.int32) * -1
562
+ for index, token in force_token_map.items():
563
+ if token is not None:
564
+ force_token_array[index] = token
565
+ self.force_token_array = tf.convert_to_tensor(force_token_array, dtype=tf.int32)
566
+
567
+ def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor:
568
+ def _force_token(generation_idx):
569
+ batch_size = scores.shape[0]
570
+ current_token = self.force_token_array[generation_idx]
571
+
572
+ new_scores = tf.ones_like(scores, dtype=scores.dtype) * -float("inf")
573
+ indices = tf.stack((tf.range(batch_size), tf.tile([current_token], [batch_size])), axis=1)
574
+ updates = tf.zeros((batch_size,), dtype=scores.dtype)
575
+ new_scores = tf.tensor_scatter_nd_update(new_scores, indices, updates)
576
+ return new_scores
577
+
578
+ scores = tf.cond(
579
+ tf.greater_equal(cur_len, tf.shape(self.force_token_array)[0]),
580
+ # If the current length is geq than the length of force_token_array, the processor does nothing.
581
+ lambda: tf.identity(scores),
582
+ # Otherwise, it may force a certain token.
583
+ lambda: tf.cond(
584
+ tf.greater_equal(self.force_token_array[cur_len], 0),
585
+ # Only valid (positive) tokens are forced
586
+ lambda: _force_token(cur_len),
587
+ # Otherwise, the processor does nothing.
588
+ lambda: scores,
589
+ ),
590
+ )
591
+ return scores
llmeval-env/lib/python3.10/site-packages/transformers/generation/tf_utils.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/generation/utils.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/transformers/onnx/__init__.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2020 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ from typing import TYPE_CHECKING
16
+
17
+ from ..utils import _LazyModule
18
+
19
+
20
+ _import_structure = {
21
+ "config": [
22
+ "EXTERNAL_DATA_FORMAT_SIZE_LIMIT",
23
+ "OnnxConfig",
24
+ "OnnxConfigWithPast",
25
+ "OnnxSeq2SeqConfigWithPast",
26
+ "PatchingSpec",
27
+ ],
28
+ "convert": ["export", "validate_model_outputs"],
29
+ "features": ["FeaturesManager"],
30
+ "utils": ["ParameterFormat", "compute_serialized_parameters_size"],
31
+ }
32
+
33
+
34
+ if TYPE_CHECKING:
35
+ from .config import (
36
+ EXTERNAL_DATA_FORMAT_SIZE_LIMIT,
37
+ OnnxConfig,
38
+ OnnxConfigWithPast,
39
+ OnnxSeq2SeqConfigWithPast,
40
+ PatchingSpec,
41
+ )
42
+ from .convert import export, validate_model_outputs
43
+ from .features import FeaturesManager
44
+ from .utils import ParameterFormat, compute_serialized_parameters_size
45
+
46
+ else:
47
+ import sys
48
+
49
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
llmeval-env/lib/python3.10/site-packages/transformers/onnx/__main__.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import subprocess
15
+ import sys
16
+ import warnings
17
+ from argparse import ArgumentParser
18
+ from pathlib import Path
19
+
20
+ from packaging import version
21
+
22
+ from .. import AutoFeatureExtractor, AutoImageProcessor, AutoProcessor, AutoTokenizer
23
+ from ..utils import logging
24
+ from ..utils.import_utils import is_optimum_available
25
+ from .convert import export, validate_model_outputs
26
+ from .features import FeaturesManager
27
+ from .utils import get_preprocessor
28
+
29
+
30
+ MIN_OPTIMUM_VERSION = "1.5.0"
31
+
32
+ ENCODER_DECODER_MODELS = ["vision-encoder-decoder"]
33
+
34
+
35
+ def export_with_optimum(args):
36
+ if is_optimum_available():
37
+ from optimum.version import __version__ as optimum_version
38
+
39
+ parsed_optimum_version = version.parse(optimum_version)
40
+ if parsed_optimum_version < version.parse(MIN_OPTIMUM_VERSION):
41
+ raise RuntimeError(
42
+ f"transformers.onnx requires optimum >= {MIN_OPTIMUM_VERSION} but {optimum_version} is installed. You "
43
+ "can upgrade optimum by running: pip install -U optimum[exporters]"
44
+ )
45
+ else:
46
+ raise RuntimeError(
47
+ "transformers.onnx requires optimum to run, you can install the library by running: pip install "
48
+ "optimum[exporters]"
49
+ )
50
+ cmd_line = [
51
+ sys.executable,
52
+ "-m",
53
+ "optimum.exporters.onnx",
54
+ f"--model {args.model}",
55
+ f"--task {args.feature}",
56
+ f"--framework {args.framework}" if args.framework is not None else "",
57
+ f"{args.output}",
58
+ ]
59
+ proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE)
60
+ proc.wait()
61
+
62
+ logger.info(
63
+ "The export was done by optimum.exporters.onnx. We recommend using to use this package directly in future, as "
64
+ "transformers.onnx is deprecated, and will be removed in v5. You can find more information here: "
65
+ "https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model."
66
+ )
67
+
68
+
69
+ def export_with_transformers(args):
70
+ args.output = args.output if args.output.is_file() else args.output.joinpath("model.onnx")
71
+ if not args.output.parent.exists():
72
+ args.output.parent.mkdir(parents=True)
73
+
74
+ # Allocate the model
75
+ model = FeaturesManager.get_model_from_feature(
76
+ args.feature, args.model, framework=args.framework, cache_dir=args.cache_dir
77
+ )
78
+
79
+ model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature=args.feature)
80
+ onnx_config = model_onnx_config(model.config)
81
+
82
+ if model_kind in ENCODER_DECODER_MODELS:
83
+ encoder_model = model.get_encoder()
84
+ decoder_model = model.get_decoder()
85
+
86
+ encoder_onnx_config = onnx_config.get_encoder_config(encoder_model.config)
87
+ decoder_onnx_config = onnx_config.get_decoder_config(
88
+ encoder_model.config, decoder_model.config, feature=args.feature
89
+ )
90
+
91
+ if args.opset is None:
92
+ args.opset = max(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)
93
+
94
+ if args.opset < min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset):
95
+ raise ValueError(
96
+ f"Opset {args.opset} is not sufficient to export {model_kind}. At least "
97
+ f" {min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)} is required."
98
+ )
99
+
100
+ preprocessor = AutoFeatureExtractor.from_pretrained(args.model)
101
+
102
+ onnx_inputs, onnx_outputs = export(
103
+ preprocessor,
104
+ encoder_model,
105
+ encoder_onnx_config,
106
+ args.opset,
107
+ args.output.parent.joinpath("encoder_model.onnx"),
108
+ )
109
+
110
+ validate_model_outputs(
111
+ encoder_onnx_config,
112
+ preprocessor,
113
+ encoder_model,
114
+ args.output.parent.joinpath("encoder_model.onnx"),
115
+ onnx_outputs,
116
+ args.atol if args.atol else encoder_onnx_config.atol_for_validation,
117
+ )
118
+
119
+ preprocessor = AutoTokenizer.from_pretrained(args.model)
120
+
121
+ onnx_inputs, onnx_outputs = export(
122
+ preprocessor,
123
+ decoder_model,
124
+ decoder_onnx_config,
125
+ args.opset,
126
+ args.output.parent.joinpath("decoder_model.onnx"),
127
+ )
128
+
129
+ validate_model_outputs(
130
+ decoder_onnx_config,
131
+ preprocessor,
132
+ decoder_model,
133
+ args.output.parent.joinpath("decoder_model.onnx"),
134
+ onnx_outputs,
135
+ args.atol if args.atol else decoder_onnx_config.atol_for_validation,
136
+ )
137
+ logger.info(
138
+ f"All good, model saved at: {args.output.parent.joinpath('encoder_model.onnx').as_posix()},"
139
+ f" {args.output.parent.joinpath('decoder_model.onnx').as_posix()}"
140
+ )
141
+
142
+ else:
143
+ # Instantiate the appropriate preprocessor
144
+ if args.preprocessor == "auto":
145
+ preprocessor = get_preprocessor(args.model)
146
+ elif args.preprocessor == "tokenizer":
147
+ preprocessor = AutoTokenizer.from_pretrained(args.model)
148
+ elif args.preprocessor == "image_processor":
149
+ preprocessor = AutoImageProcessor.from_pretrained(args.model)
150
+ elif args.preprocessor == "feature_extractor":
151
+ preprocessor = AutoFeatureExtractor.from_pretrained(args.model)
152
+ elif args.preprocessor == "processor":
153
+ preprocessor = AutoProcessor.from_pretrained(args.model)
154
+ else:
155
+ raise ValueError(f"Unknown preprocessor type '{args.preprocessor}'")
156
+
157
+ # Ensure the requested opset is sufficient
158
+ if args.opset is None:
159
+ args.opset = onnx_config.default_onnx_opset
160
+
161
+ if args.opset < onnx_config.default_onnx_opset:
162
+ raise ValueError(
163
+ f"Opset {args.opset} is not sufficient to export {model_kind}. "
164
+ f"At least {onnx_config.default_onnx_opset} is required."
165
+ )
166
+
167
+ onnx_inputs, onnx_outputs = export(
168
+ preprocessor,
169
+ model,
170
+ onnx_config,
171
+ args.opset,
172
+ args.output,
173
+ )
174
+
175
+ if args.atol is None:
176
+ args.atol = onnx_config.atol_for_validation
177
+
178
+ validate_model_outputs(onnx_config, preprocessor, model, args.output, onnx_outputs, args.atol)
179
+ logger.info(f"All good, model saved at: {args.output.as_posix()}")
180
+ warnings.warn(
181
+ "The export was done by transformers.onnx which is deprecated and will be removed in v5. We recommend"
182
+ " using optimum.exporters.onnx in future. You can find more information here:"
183
+ " https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model.",
184
+ FutureWarning,
185
+ )
186
+
187
+
188
+ def main():
189
+ parser = ArgumentParser("Hugging Face Transformers ONNX exporter")
190
+ parser.add_argument(
191
+ "-m", "--model", type=str, required=True, help="Model ID on huggingface.co or path on disk to load model from."
192
+ )
193
+ parser.add_argument(
194
+ "--feature",
195
+ default="default",
196
+ help="The type of features to export the model with.",
197
+ )
198
+ parser.add_argument("--opset", type=int, default=None, help="ONNX opset version to export the model with.")
199
+ parser.add_argument(
200
+ "--atol", type=float, default=None, help="Absolute difference tolerance when validating the model."
201
+ )
202
+ parser.add_argument(
203
+ "--framework",
204
+ type=str,
205
+ choices=["pt", "tf"],
206
+ default=None,
207
+ help=(
208
+ "The framework to use for the ONNX export."
209
+ " If not provided, will attempt to use the local checkpoint's original framework"
210
+ " or what is available in the environment."
211
+ ),
212
+ )
213
+ parser.add_argument("output", type=Path, help="Path indicating where to store generated ONNX model.")
214
+ parser.add_argument("--cache_dir", type=str, default=None, help="Path indicating where to store cache.")
215
+ parser.add_argument(
216
+ "--preprocessor",
217
+ type=str,
218
+ choices=["auto", "tokenizer", "feature_extractor", "image_processor", "processor"],
219
+ default="auto",
220
+ help="Which type of preprocessor to use. 'auto' tries to automatically detect it.",
221
+ )
222
+ parser.add_argument(
223
+ "--export_with_transformers",
224
+ action="store_true",
225
+ help=(
226
+ "Whether to use transformers.onnx instead of optimum.exporters.onnx to perform the ONNX export. It can be "
227
+ "useful when exporting a model supported in transformers but not in optimum, otherwise it is not "
228
+ "recommended."
229
+ ),
230
+ )
231
+
232
+ args = parser.parse_args()
233
+ if args.export_with_transformers or not is_optimum_available():
234
+ export_with_transformers(args)
235
+ else:
236
+ export_with_optimum(args)
237
+
238
+
239
+ if __name__ == "__main__":
240
+ logger = logging.get_logger("transformers.onnx") # pylint: disable=invalid-name
241
+ logger.setLevel(logging.INFO)
242
+ main()
llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (880 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (5.89 kB). View file