diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd05bf37d744428d32a5a163fcc490b25699c828 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1d147a97d485d60f7e22b94e5cf89d34ae95d2d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/__pycache__/data_collator.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..378894ab4bbb4704b67b1de4ab512f145b889d46 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__init__.py @@ -0,0 +1,23 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .glue import GlueDataset, GlueDataTrainingArguments +from .language_modeling import ( + LineByLineTextDataset, + LineByLineWithRefDataset, + LineByLineWithSOPTextDataset, + TextDataset, + TextDatasetForNextSentencePrediction, +) +from .squad import SquadDataset, SquadDataTrainingArguments diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..077ad88cf66e84442c42bf44b0c1a2ba4eccf4f4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a430ccc1cc0fa22b4858d72e37a4c86b427d8db Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/glue.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33008c6d72c263b5ae2ae219ee95a3dcdb8fc19b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/datasets/__pycache__/language_modeling.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ebd0d17aa55bb4529820ce347f6275d38f6c0caa --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__init__.py @@ -0,0 +1,98 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +from ...utils import is_sklearn_available, requires_backends + + +if is_sklearn_available(): + from scipy.stats import pearsonr, spearmanr + from sklearn.metrics import f1_score, matthews_corrcoef + + +DEPRECATION_WARNING = ( + "This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate " + "library. You can have a look at this example script for pointers: " + "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" +) + + +def simple_accuracy(preds, labels): + warnings.warn(DEPRECATION_WARNING, FutureWarning) + requires_backends(simple_accuracy, "sklearn") + return (preds == labels).mean() + + +def acc_and_f1(preds, labels): + warnings.warn(DEPRECATION_WARNING, FutureWarning) + requires_backends(acc_and_f1, "sklearn") + acc = simple_accuracy(preds, labels) + f1 = f1_score(y_true=labels, y_pred=preds) + return { + "acc": acc, + "f1": f1, + "acc_and_f1": (acc + f1) / 2, + } + + +def pearson_and_spearman(preds, labels): + warnings.warn(DEPRECATION_WARNING, FutureWarning) + requires_backends(pearson_and_spearman, "sklearn") + pearson_corr = pearsonr(preds, labels)[0] + spearman_corr = spearmanr(preds, labels)[0] + return { + "pearson": pearson_corr, + "spearmanr": spearman_corr, + "corr": (pearson_corr + spearman_corr) / 2, + } + + +def glue_compute_metrics(task_name, preds, labels): + warnings.warn(DEPRECATION_WARNING, FutureWarning) + requires_backends(glue_compute_metrics, "sklearn") + assert len(preds) == len(labels), f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}" + if task_name == "cola": + return {"mcc": matthews_corrcoef(labels, preds)} + elif task_name == "sst-2": + return {"acc": simple_accuracy(preds, labels)} + elif task_name == "mrpc": + return acc_and_f1(preds, labels) + elif task_name == "sts-b": + return pearson_and_spearman(preds, labels) + elif task_name == "qqp": + return acc_and_f1(preds, labels) + elif task_name == "mnli": + return {"mnli/acc": simple_accuracy(preds, labels)} + elif task_name == "mnli-mm": + return {"mnli-mm/acc": simple_accuracy(preds, labels)} + elif task_name == "qnli": + return {"acc": simple_accuracy(preds, labels)} + elif task_name == "rte": + return {"acc": simple_accuracy(preds, labels)} + elif task_name == "wnli": + return {"acc": simple_accuracy(preds, labels)} + elif task_name == "hans": + return {"acc": simple_accuracy(preds, labels)} + else: + raise KeyError(task_name) + + +def xnli_compute_metrics(task_name, preds, labels): + warnings.warn(DEPRECATION_WARNING, FutureWarning) + requires_backends(xnli_compute_metrics, "sklearn") + if len(preds) != len(labels): + raise ValueError(f"Predictions and labels have mismatched lengths {len(preds)} and {len(labels)}") + if task_name == "xnli": + return {"acc": simple_accuracy(preds, labels)} + else: + raise KeyError(task_name) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7eb6d6774b3c725cf4bd1aeac3d0ec3240cedf4a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0f9621e817a45026d1a5ab028dd4629bd35e295 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/__pycache__/squad_metrics.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/squad_metrics.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/squad_metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..6eea34ad9e81f470c4538189e27ce3e0ab925505 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/metrics/squad_metrics.py @@ -0,0 +1,780 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Very heavily inspired by the official evaluation script for SQuAD version 2.0 which was modified by XLNet authors to +update `find_best_threshold` scripts for SQuAD V2.0 + +In addition to basic functionality, we also compute additional statistics and plot precision-recall curves if an +additional na_prob.json file is provided. This file is expected to map question ID's to the model's predicted +probability that a question is unanswerable. +""" + + +import collections +import json +import math +import re +import string + +from ...models.bert import BasicTokenizer +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +def normalize_answer(s): + """Lower text and remove punctuation, articles and extra whitespace.""" + + def remove_articles(text): + regex = re.compile(r"\b(a|an|the)\b", re.UNICODE) + return re.sub(regex, " ", text) + + def white_space_fix(text): + return " ".join(text.split()) + + def remove_punc(text): + exclude = set(string.punctuation) + return "".join(ch for ch in text if ch not in exclude) + + def lower(text): + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(s)))) + + +def get_tokens(s): + if not s: + return [] + return normalize_answer(s).split() + + +def compute_exact(a_gold, a_pred): + return int(normalize_answer(a_gold) == normalize_answer(a_pred)) + + +def compute_f1(a_gold, a_pred): + gold_toks = get_tokens(a_gold) + pred_toks = get_tokens(a_pred) + common = collections.Counter(gold_toks) & collections.Counter(pred_toks) + num_same = sum(common.values()) + if len(gold_toks) == 0 or len(pred_toks) == 0: + # If either is no-answer, then F1 is 1 if they agree, 0 otherwise + return int(gold_toks == pred_toks) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(pred_toks) + recall = 1.0 * num_same / len(gold_toks) + f1 = (2 * precision * recall) / (precision + recall) + return f1 + + +def get_raw_scores(examples, preds): + """ + Computes the exact and f1 scores from the examples and the model predictions + """ + exact_scores = {} + f1_scores = {} + + for example in examples: + qas_id = example.qas_id + gold_answers = [answer["text"] for answer in example.answers if normalize_answer(answer["text"])] + + if not gold_answers: + # For unanswerable questions, only correct answer is empty string + gold_answers = [""] + + if qas_id not in preds: + print(f"Missing prediction for {qas_id}") + continue + + prediction = preds[qas_id] + exact_scores[qas_id] = max(compute_exact(a, prediction) for a in gold_answers) + f1_scores[qas_id] = max(compute_f1(a, prediction) for a in gold_answers) + + return exact_scores, f1_scores + + +def apply_no_ans_threshold(scores, na_probs, qid_to_has_ans, na_prob_thresh): + new_scores = {} + for qid, s in scores.items(): + pred_na = na_probs[qid] > na_prob_thresh + if pred_na: + new_scores[qid] = float(not qid_to_has_ans[qid]) + else: + new_scores[qid] = s + return new_scores + + +def make_eval_dict(exact_scores, f1_scores, qid_list=None): + if not qid_list: + total = len(exact_scores) + return collections.OrderedDict( + [ + ("exact", 100.0 * sum(exact_scores.values()) / total), + ("f1", 100.0 * sum(f1_scores.values()) / total), + ("total", total), + ] + ) + else: + total = len(qid_list) + return collections.OrderedDict( + [ + ("exact", 100.0 * sum(exact_scores[k] for k in qid_list) / total), + ("f1", 100.0 * sum(f1_scores[k] for k in qid_list) / total), + ("total", total), + ] + ) + + +def merge_eval(main_eval, new_eval, prefix): + for k in new_eval: + main_eval[f"{prefix}_{k}"] = new_eval[k] + + +def find_best_thresh_v2(preds, scores, na_probs, qid_to_has_ans): + num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) + cur_score = num_no_ans + best_score = cur_score + best_thresh = 0.0 + qid_list = sorted(na_probs, key=lambda k: na_probs[k]) + for i, qid in enumerate(qid_list): + if qid not in scores: + continue + if qid_to_has_ans[qid]: + diff = scores[qid] + else: + if preds[qid]: + diff = -1 + else: + diff = 0 + cur_score += diff + if cur_score > best_score: + best_score = cur_score + best_thresh = na_probs[qid] + + has_ans_score, has_ans_cnt = 0, 0 + for qid in qid_list: + if not qid_to_has_ans[qid]: + continue + has_ans_cnt += 1 + + if qid not in scores: + continue + has_ans_score += scores[qid] + + return 100.0 * best_score / len(scores), best_thresh, 1.0 * has_ans_score / has_ans_cnt + + +def find_all_best_thresh_v2(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): + best_exact, exact_thresh, has_ans_exact = find_best_thresh_v2(preds, exact_raw, na_probs, qid_to_has_ans) + best_f1, f1_thresh, has_ans_f1 = find_best_thresh_v2(preds, f1_raw, na_probs, qid_to_has_ans) + main_eval["best_exact"] = best_exact + main_eval["best_exact_thresh"] = exact_thresh + main_eval["best_f1"] = best_f1 + main_eval["best_f1_thresh"] = f1_thresh + main_eval["has_ans_exact"] = has_ans_exact + main_eval["has_ans_f1"] = has_ans_f1 + + +def find_best_thresh(preds, scores, na_probs, qid_to_has_ans): + num_no_ans = sum(1 for k in qid_to_has_ans if not qid_to_has_ans[k]) + cur_score = num_no_ans + best_score = cur_score + best_thresh = 0.0 + qid_list = sorted(na_probs, key=lambda k: na_probs[k]) + for _, qid in enumerate(qid_list): + if qid not in scores: + continue + if qid_to_has_ans[qid]: + diff = scores[qid] + else: + if preds[qid]: + diff = -1 + else: + diff = 0 + cur_score += diff + if cur_score > best_score: + best_score = cur_score + best_thresh = na_probs[qid] + return 100.0 * best_score / len(scores), best_thresh + + +def find_all_best_thresh(main_eval, preds, exact_raw, f1_raw, na_probs, qid_to_has_ans): + best_exact, exact_thresh = find_best_thresh(preds, exact_raw, na_probs, qid_to_has_ans) + best_f1, f1_thresh = find_best_thresh(preds, f1_raw, na_probs, qid_to_has_ans) + + main_eval["best_exact"] = best_exact + main_eval["best_exact_thresh"] = exact_thresh + main_eval["best_f1"] = best_f1 + main_eval["best_f1_thresh"] = f1_thresh + + +def squad_evaluate(examples, preds, no_answer_probs=None, no_answer_probability_threshold=1.0): + qas_id_to_has_answer = {example.qas_id: bool(example.answers) for example in examples} + has_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if has_answer] + no_answer_qids = [qas_id for qas_id, has_answer in qas_id_to_has_answer.items() if not has_answer] + + if no_answer_probs is None: + no_answer_probs = {k: 0.0 for k in preds} + + exact, f1 = get_raw_scores(examples, preds) + + exact_threshold = apply_no_ans_threshold( + exact, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold + ) + f1_threshold = apply_no_ans_threshold(f1, no_answer_probs, qas_id_to_has_answer, no_answer_probability_threshold) + + evaluation = make_eval_dict(exact_threshold, f1_threshold) + + if has_answer_qids: + has_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=has_answer_qids) + merge_eval(evaluation, has_ans_eval, "HasAns") + + if no_answer_qids: + no_ans_eval = make_eval_dict(exact_threshold, f1_threshold, qid_list=no_answer_qids) + merge_eval(evaluation, no_ans_eval, "NoAns") + + if no_answer_probs: + find_all_best_thresh(evaluation, preds, exact, f1, no_answer_probs, qas_id_to_has_answer) + + return evaluation + + +def get_final_text(pred_text, orig_text, do_lower_case, verbose_logging=False): + """Project the tokenized prediction back to the original text.""" + + # When we created the data, we kept track of the alignment between original + # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So + # now `orig_text` contains the span of our original text corresponding to the + # span that we predicted. + # + # However, `orig_text` may contain extra characters that we don't want in + # our prediction. + # + # For example, let's say: + # pred_text = steve smith + # orig_text = Steve Smith's + # + # We don't want to return `orig_text` because it contains the extra "'s". + # + # We don't want to return `pred_text` because it's already been normalized + # (the SQuAD eval script also does punctuation stripping/lower casing but + # our tokenizer does additional normalization like stripping accent + # characters). + # + # What we really want to return is "Steve Smith". + # + # Therefore, we have to apply a semi-complicated alignment heuristic between + # `pred_text` and `orig_text` to get a character-to-character alignment. This + # can fail in certain cases in which case we just return `orig_text`. + + def _strip_spaces(text): + ns_chars = [] + ns_to_s_map = collections.OrderedDict() + for i, c in enumerate(text): + if c == " ": + continue + ns_to_s_map[len(ns_chars)] = i + ns_chars.append(c) + ns_text = "".join(ns_chars) + return (ns_text, ns_to_s_map) + + # We first tokenize `orig_text`, strip whitespace from the result + # and `pred_text`, and check if they are the same length. If they are + # NOT the same length, the heuristic has failed. If they are the same + # length, we assume the characters are one-to-one aligned. + tokenizer = BasicTokenizer(do_lower_case=do_lower_case) + + tok_text = " ".join(tokenizer.tokenize(orig_text)) + + start_position = tok_text.find(pred_text) + if start_position == -1: + if verbose_logging: + logger.info(f"Unable to find text: '{pred_text}' in '{orig_text}'") + return orig_text + end_position = start_position + len(pred_text) - 1 + + (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text) + (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text) + + if len(orig_ns_text) != len(tok_ns_text): + if verbose_logging: + logger.info(f"Length not equal after stripping spaces: '{orig_ns_text}' vs '{tok_ns_text}'") + return orig_text + + # We then project the characters in `pred_text` back to `orig_text` using + # the character-to-character alignment. + tok_s_to_ns_map = {} + for i, tok_index in tok_ns_to_s_map.items(): + tok_s_to_ns_map[tok_index] = i + + orig_start_position = None + if start_position in tok_s_to_ns_map: + ns_start_position = tok_s_to_ns_map[start_position] + if ns_start_position in orig_ns_to_s_map: + orig_start_position = orig_ns_to_s_map[ns_start_position] + + if orig_start_position is None: + if verbose_logging: + logger.info("Couldn't map start position") + return orig_text + + orig_end_position = None + if end_position in tok_s_to_ns_map: + ns_end_position = tok_s_to_ns_map[end_position] + if ns_end_position in orig_ns_to_s_map: + orig_end_position = orig_ns_to_s_map[ns_end_position] + + if orig_end_position is None: + if verbose_logging: + logger.info("Couldn't map end position") + return orig_text + + output_text = orig_text[orig_start_position : (orig_end_position + 1)] + return output_text + + +def _get_best_indexes(logits, n_best_size): + """Get the n-best logits from a list.""" + index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True) + + best_indexes = [] + for i in range(len(index_and_score)): + if i >= n_best_size: + break + best_indexes.append(index_and_score[i][0]) + return best_indexes + + +def _compute_softmax(scores): + """Compute softmax probability over raw logits.""" + if not scores: + return [] + + max_score = None + for score in scores: + if max_score is None or score > max_score: + max_score = score + + exp_scores = [] + total_sum = 0.0 + for score in scores: + x = math.exp(score - max_score) + exp_scores.append(x) + total_sum += x + + probs = [] + for score in exp_scores: + probs.append(score / total_sum) + return probs + + +def compute_predictions_logits( + all_examples, + all_features, + all_results, + n_best_size, + max_answer_length, + do_lower_case, + output_prediction_file, + output_nbest_file, + output_null_log_odds_file, + verbose_logging, + version_2_with_negative, + null_score_diff_threshold, + tokenizer, +): + """Write final predictions to the json file and log-odds of null if needed.""" + if output_prediction_file: + logger.info(f"Writing predictions to: {output_prediction_file}") + if output_nbest_file: + logger.info(f"Writing nbest to: {output_nbest_file}") + if output_null_log_odds_file and version_2_with_negative: + logger.info(f"Writing null_log_odds to: {output_null_log_odds_file}") + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_logit", "end_logit"] + ) + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for example_index, example in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + min_null_feature_index = 0 # the paragraph slice with min null score + null_start_logit = 0 # the start logit at the slice with min null score + null_end_logit = 0 # the end logit at the slice with min null score + for feature_index, feature in enumerate(features): + result = unique_id_to_result[feature.unique_id] + start_indexes = _get_best_indexes(result.start_logits, n_best_size) + end_indexes = _get_best_indexes(result.end_logits, n_best_size) + # if we could have irrelevant answers, get the min score of irrelevant + if version_2_with_negative: + feature_null_score = result.start_logits[0] + result.end_logits[0] + if feature_null_score < score_null: + score_null = feature_null_score + min_null_feature_index = feature_index + null_start_logit = result.start_logits[0] + null_end_logit = result.end_logits[0] + for start_index in start_indexes: + for end_index in end_indexes: + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= len(feature.tokens): + continue + if end_index >= len(feature.tokens): + continue + if start_index not in feature.token_to_orig_map: + continue + if end_index not in feature.token_to_orig_map: + continue + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_logit=result.start_logits[start_index], + end_logit=result.end_logits[end_index], + ) + ) + if version_2_with_negative: + prelim_predictions.append( + _PrelimPrediction( + feature_index=min_null_feature_index, + start_index=0, + end_index=0, + start_logit=null_start_logit, + end_logit=null_end_logit, + ) + ) + prelim_predictions = sorted(prelim_predictions, key=lambda x: (x.start_logit + x.end_logit), reverse=True) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_logit", "end_logit"] + ) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + if pred.start_index > 0: # this is a non-null prediction + tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)] + + tok_text = tokenizer.convert_tokens_to_string(tok_tokens) + + # tok_text = " ".join(tok_tokens) + # + # # De-tokenize WordPieces that have been split off. + # tok_text = tok_text.replace(" ##", "") + # tok_text = tok_text.replace("##", "") + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + else: + final_text = "" + seen_predictions[final_text] = True + + nbest.append(_NbestPrediction(text=final_text, start_logit=pred.start_logit, end_logit=pred.end_logit)) + # if we didn't include the empty option in the n-best, include it + if version_2_with_negative: + if "" not in seen_predictions: + nbest.append(_NbestPrediction(text="", start_logit=null_start_logit, end_logit=null_end_logit)) + + # In very rare edge cases we could only have single null prediction. + # So we just create a nonce prediction in this case to avoid failure. + if len(nbest) == 1: + nbest.insert(0, _NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append(_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0)) + + if len(nbest) < 1: + raise ValueError("No valid predictions") + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_logit + entry.end_logit) + if not best_non_null_entry: + if entry.text: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for i, entry in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_logit"] = entry.start_logit + output["end_logit"] = entry.end_logit + nbest_json.append(output) + + if len(nbest_json) < 1: + raise ValueError("No valid predictions") + + if not version_2_with_negative: + all_predictions[example.qas_id] = nbest_json[0]["text"] + else: + # predict "" iff the null score - the score of best non-null > threshold + score_diff = score_null - best_non_null_entry.start_logit - (best_non_null_entry.end_logit) + scores_diff_json[example.qas_id] = score_diff + if score_diff > null_score_diff_threshold: + all_predictions[example.qas_id] = "" + else: + all_predictions[example.qas_id] = best_non_null_entry.text + all_nbest_json[example.qas_id] = nbest_json + + if output_prediction_file: + with open(output_prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + + if output_nbest_file: + with open(output_nbest_file, "w") as writer: + writer.write(json.dumps(all_nbest_json, indent=4) + "\n") + + if output_null_log_odds_file and version_2_with_negative: + with open(output_null_log_odds_file, "w") as writer: + writer.write(json.dumps(scores_diff_json, indent=4) + "\n") + + return all_predictions + + +def compute_predictions_log_probs( + all_examples, + all_features, + all_results, + n_best_size, + max_answer_length, + output_prediction_file, + output_nbest_file, + output_null_log_odds_file, + start_n_top, + end_n_top, + version_2_with_negative, + tokenizer, + verbose_logging, +): + """ + XLNet write prediction logic (more complex than Bert's). Write final predictions to the json file and log-odds of + null if needed. + + Requires utils_squad_evaluate.py + """ + _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name + "PrelimPrediction", ["feature_index", "start_index", "end_index", "start_log_prob", "end_log_prob"] + ) + + _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name + "NbestPrediction", ["text", "start_log_prob", "end_log_prob"] + ) + + logger.info(f"Writing predictions to: {output_prediction_file}") + + example_index_to_features = collections.defaultdict(list) + for feature in all_features: + example_index_to_features[feature.example_index].append(feature) + + unique_id_to_result = {} + for result in all_results: + unique_id_to_result[result.unique_id] = result + + all_predictions = collections.OrderedDict() + all_nbest_json = collections.OrderedDict() + scores_diff_json = collections.OrderedDict() + + for example_index, example in enumerate(all_examples): + features = example_index_to_features[example_index] + + prelim_predictions = [] + # keep track of the minimum score of null start+end of position 0 + score_null = 1000000 # large and positive + + for feature_index, feature in enumerate(features): + result = unique_id_to_result[feature.unique_id] + + cur_null_score = result.cls_logits + + # if we could have irrelevant answers, get the min score of irrelevant + score_null = min(score_null, cur_null_score) + + for i in range(start_n_top): + for j in range(end_n_top): + start_log_prob = result.start_logits[i] + start_index = result.start_top_index[i] + + j_index = i * end_n_top + j + + end_log_prob = result.end_logits[j_index] + end_index = result.end_top_index[j_index] + + # We could hypothetically create invalid predictions, e.g., predict + # that the start of the span is in the question. We throw out all + # invalid predictions. + if start_index >= feature.paragraph_len - 1: + continue + if end_index >= feature.paragraph_len - 1: + continue + + if not feature.token_is_max_context.get(start_index, False): + continue + if end_index < start_index: + continue + length = end_index - start_index + 1 + if length > max_answer_length: + continue + + prelim_predictions.append( + _PrelimPrediction( + feature_index=feature_index, + start_index=start_index, + end_index=end_index, + start_log_prob=start_log_prob, + end_log_prob=end_log_prob, + ) + ) + + prelim_predictions = sorted( + prelim_predictions, key=lambda x: (x.start_log_prob + x.end_log_prob), reverse=True + ) + + seen_predictions = {} + nbest = [] + for pred in prelim_predictions: + if len(nbest) >= n_best_size: + break + feature = features[pred.feature_index] + + # XLNet un-tokenizer + # Let's keep it simple for now and see if we need all this later. + # + # tok_start_to_orig_index = feature.tok_start_to_orig_index + # tok_end_to_orig_index = feature.tok_end_to_orig_index + # start_orig_pos = tok_start_to_orig_index[pred.start_index] + # end_orig_pos = tok_end_to_orig_index[pred.end_index] + # paragraph_text = example.paragraph_text + # final_text = paragraph_text[start_orig_pos: end_orig_pos + 1].strip() + + # Previously used Bert untokenizer + tok_tokens = feature.tokens[pred.start_index : (pred.end_index + 1)] + orig_doc_start = feature.token_to_orig_map[pred.start_index] + orig_doc_end = feature.token_to_orig_map[pred.end_index] + orig_tokens = example.doc_tokens[orig_doc_start : (orig_doc_end + 1)] + tok_text = tokenizer.convert_tokens_to_string(tok_tokens) + + # Clean whitespace + tok_text = tok_text.strip() + tok_text = " ".join(tok_text.split()) + orig_text = " ".join(orig_tokens) + + if hasattr(tokenizer, "do_lower_case"): + do_lower_case = tokenizer.do_lower_case + else: + do_lower_case = tokenizer.do_lowercase_and_remove_accent + + final_text = get_final_text(tok_text, orig_text, do_lower_case, verbose_logging) + + if final_text in seen_predictions: + continue + + seen_predictions[final_text] = True + + nbest.append( + _NbestPrediction(text=final_text, start_log_prob=pred.start_log_prob, end_log_prob=pred.end_log_prob) + ) + + # In very rare edge cases we could have no valid predictions. So we + # just create a nonce prediction in this case to avoid failure. + if not nbest: + nbest.append(_NbestPrediction(text="", start_log_prob=-1e6, end_log_prob=-1e6)) + + total_scores = [] + best_non_null_entry = None + for entry in nbest: + total_scores.append(entry.start_log_prob + entry.end_log_prob) + if not best_non_null_entry: + best_non_null_entry = entry + + probs = _compute_softmax(total_scores) + + nbest_json = [] + for i, entry in enumerate(nbest): + output = collections.OrderedDict() + output["text"] = entry.text + output["probability"] = probs[i] + output["start_log_prob"] = entry.start_log_prob + output["end_log_prob"] = entry.end_log_prob + nbest_json.append(output) + + if len(nbest_json) < 1: + raise ValueError("No valid predictions") + if best_non_null_entry is None: + raise ValueError("No valid predictions") + + score_diff = score_null + scores_diff_json[example.qas_id] = score_diff + # note(zhiliny): always predict best_non_null_entry + # and the evaluation script will search for the best threshold + all_predictions[example.qas_id] = best_non_null_entry.text + + all_nbest_json[example.qas_id] = nbest_json + + with open(output_prediction_file, "w") as writer: + writer.write(json.dumps(all_predictions, indent=4) + "\n") + + with open(output_nbest_file, "w") as writer: + writer.write(json.dumps(all_nbest_json, indent=4) + "\n") + + if version_2_with_negative: + with open(output_null_log_odds_file, "w") as writer: + writer.write(json.dumps(scores_diff_json, indent=4) + "\n") + + return all_predictions diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a26ab5776d74715428b10c4d9cd943e53b253785 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels +from .squad import SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features +from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor +from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eebae187d331a076310e20d14b22c1749fe63d1d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff34b26f7c51ef9c1f4d7fd4cea7f93003def799 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/glue.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb1461eccefeacc3a3f2f9b2ab4600e71bd27b19 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/squad.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b6a99478432b1b522319bbd1ac28aa45decf3d4 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d669802f6c6b67d1c1b78eb2277113428f8de890 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/__pycache__/xnli.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/glue.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/glue.py new file mode 100644 index 0000000000000000000000000000000000000000..3d22968c9d06323c7c1cd4b00e5fcd2e6cf3f35d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/glue.py @@ -0,0 +1,643 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" GLUE processors and helpers""" + +import os +import warnings +from dataclasses import asdict +from enum import Enum +from typing import List, Optional, Union + +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import is_tf_available, logging +from .utils import DataProcessor, InputExample, InputFeatures + + +if is_tf_available(): + import tensorflow as tf + +logger = logging.get_logger(__name__) + +DEPRECATION_WARNING = ( + "This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets " + "library. You can have a look at this example script for pointers: " + "https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" +) + + +def glue_convert_examples_to_features( + examples: Union[List[InputExample], "tf.data.Dataset"], + tokenizer: PreTrainedTokenizer, + max_length: Optional[int] = None, + task=None, + label_list=None, + output_mode=None, +): + """ + Loads a data file into a list of `InputFeatures` + + Args: + examples: List of `InputExamples` or `tf.data.Dataset` containing the examples. + tokenizer: Instance of a tokenizer that will tokenize the examples + max_length: Maximum example length. Defaults to the tokenizer's max_len + task: GLUE task + label_list: List of labels. Can be obtained from the processor using the `processor.get_labels()` method + output_mode: String indicating the output mode. Either `regression` or `classification` + + Returns: + If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the task-specific + features. If the input is a list of `InputExamples`, will return a list of task-specific `InputFeatures` which + can be fed to the model. + + """ + warnings.warn(DEPRECATION_WARNING.format("function"), FutureWarning) + if is_tf_available() and isinstance(examples, tf.data.Dataset): + if task is None: + raise ValueError("When calling glue_convert_examples_to_features from TF, the task parameter is required.") + return _tf_glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task) + return _glue_convert_examples_to_features( + examples, tokenizer, max_length=max_length, task=task, label_list=label_list, output_mode=output_mode + ) + + +if is_tf_available(): + + def _tf_glue_convert_examples_to_features( + examples: tf.data.Dataset, + tokenizer: PreTrainedTokenizer, + task=str, + max_length: Optional[int] = None, + ) -> tf.data.Dataset: + """ + Returns: + A `tf.data.Dataset` containing the task-specific features. + + """ + processor = glue_processors[task]() + examples = [processor.tfds_map(processor.get_example_from_tensor_dict(example)) for example in examples] + features = glue_convert_examples_to_features(examples, tokenizer, max_length=max_length, task=task) + label_type = tf.float32 if task == "sts-b" else tf.int64 + + def gen(): + for ex in features: + d = {k: v for k, v in asdict(ex).items() if v is not None} + label = d.pop("label") + yield (d, label) + + input_names = tokenizer.model_input_names + + return tf.data.Dataset.from_generator( + gen, + ({k: tf.int32 for k in input_names}, label_type), + ({k: tf.TensorShape([None]) for k in input_names}, tf.TensorShape([])), + ) + + +def _glue_convert_examples_to_features( + examples: List[InputExample], + tokenizer: PreTrainedTokenizer, + max_length: Optional[int] = None, + task=None, + label_list=None, + output_mode=None, +): + if max_length is None: + max_length = tokenizer.model_max_length + + if task is not None: + processor = glue_processors[task]() + if label_list is None: + label_list = processor.get_labels() + logger.info(f"Using label list {label_list} for task {task}") + if output_mode is None: + output_mode = glue_output_modes[task] + logger.info(f"Using output mode {output_mode} for task {task}") + + label_map = {label: i for i, label in enumerate(label_list)} + + def label_from_example(example: InputExample) -> Union[int, float, None]: + if example.label is None: + return None + if output_mode == "classification": + return label_map[example.label] + elif output_mode == "regression": + return float(example.label) + raise KeyError(output_mode) + + labels = [label_from_example(example) for example in examples] + + batch_encoding = tokenizer( + [(example.text_a, example.text_b) for example in examples], + max_length=max_length, + padding="max_length", + truncation=True, + ) + + features = [] + for i in range(len(examples)): + inputs = {k: batch_encoding[k][i] for k in batch_encoding} + + feature = InputFeatures(**inputs, label=labels[i]) + features.append(feature) + + for i, example in enumerate(examples[:5]): + logger.info("*** Example ***") + logger.info(f"guid: {example.guid}") + logger.info(f"features: {features[i]}") + + return features + + +class OutputMode(Enum): + classification = "classification" + regression = "regression" + + +class MrpcProcessor(DataProcessor): + """Processor for the MRPC data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + logger.info(f"LOOKING AT {os.path.join(data_dir, 'train.tsv')}") + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{i}" + text_a = line[3] + text_b = line[4] + label = None if set_type == "test" else line[0] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class MnliProcessor(DataProcessor): + """Processor for the MultiNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["premise"].numpy().decode("utf-8"), + tensor_dict["hypothesis"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")), "dev_matched") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test_matched") + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[8] + text_b = line[9] + label = None if set_type.startswith("test") else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class MnliMismatchedProcessor(MnliProcessor): + """Processor for the MultiNLI Mismatched data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")), "dev_mismatched") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test_mismatched") + + +class ColaProcessor(DataProcessor): + """Processor for the CoLA data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence"].numpy().decode("utf-8"), + None, + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + test_mode = set_type == "test" + if test_mode: + lines = lines[1:] + text_index = 1 if test_mode else 3 + examples = [] + for i, line in enumerate(lines): + guid = f"{set_type}-{i}" + text_a = line[text_index] + label = None if test_mode else line[1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +class Sst2Processor(DataProcessor): + """Processor for the SST-2 data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence"].numpy().decode("utf-8"), + None, + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + text_index = 1 if set_type == "test" else 0 + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{i}" + text_a = line[text_index] + label = None if set_type == "test" else line[1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=None, label=label)) + return examples + + +class StsbProcessor(DataProcessor): + """Processor for the STS-B data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return [None] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[7] + text_b = line[8] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class QqpProcessor(DataProcessor): + """Processor for the QQP data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["question1"].numpy().decode("utf-8"), + tensor_dict["question2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + test_mode = set_type == "test" + q1_index = 1 if test_mode else 3 + q2_index = 2 if test_mode else 4 + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + try: + text_a = line[q1_index] + text_b = line[q2_index] + label = None if test_mode else line[5] + except IndexError: + continue + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class QnliProcessor(DataProcessor): + """Processor for the QNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["question"].numpy().decode("utf-8"), + tensor_dict["sentence"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["entailment", "not_entailment"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class RteProcessor(DataProcessor): + """Processor for the RTE data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["entailment", "not_entailment"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +class WnliProcessor(DataProcessor): + """Processor for the WNLI data set (GLUE version).""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn(DEPRECATION_WARNING.format("processor"), FutureWarning) + + def get_example_from_tensor_dict(self, tensor_dict): + """See base class.""" + return InputExample( + tensor_dict["idx"].numpy(), + tensor_dict["sentence1"].numpy().decode("utf-8"), + tensor_dict["sentence2"].numpy().decode("utf-8"), + str(tensor_dict["label"].numpy()), + ) + + def get_train_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "train.tsv")), "train") + + def get_dev_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev") + + def get_test_examples(self, data_dir): + """See base class.""" + return self._create_examples(self._read_tsv(os.path.join(data_dir, "test.tsv")), "test") + + def get_labels(self): + """See base class.""" + return ["0", "1"] + + def _create_examples(self, lines, set_type): + """Creates examples for the training, dev and test sets.""" + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"{set_type}-{line[0]}" + text_a = line[1] + text_b = line[2] + label = None if set_type == "test" else line[-1] + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + +glue_tasks_num_labels = { + "cola": 2, + "mnli": 3, + "mrpc": 2, + "sst-2": 2, + "sts-b": 1, + "qqp": 2, + "qnli": 2, + "rte": 2, + "wnli": 2, +} + +glue_processors = { + "cola": ColaProcessor, + "mnli": MnliProcessor, + "mnli-mm": MnliMismatchedProcessor, + "mrpc": MrpcProcessor, + "sst-2": Sst2Processor, + "sts-b": StsbProcessor, + "qqp": QqpProcessor, + "qnli": QnliProcessor, + "rte": RteProcessor, + "wnli": WnliProcessor, +} + +glue_output_modes = { + "cola": "classification", + "mnli": "classification", + "mnli-mm": "classification", + "mrpc": "classification", + "sst-2": "classification", + "sts-b": "regression", + "qqp": "classification", + "qnli": "classification", + "rte": "classification", + "wnli": "classification", +} diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/squad.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/squad.py new file mode 100644 index 0000000000000000000000000000000000000000..0f8bd2480551158c9916215e43436c8e027dbed0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/squad.py @@ -0,0 +1,845 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from functools import partial +from multiprocessing import Pool, cpu_count + +import numpy as np +from tqdm import tqdm + +from ...models.bert.tokenization_bert import whitespace_tokenize +from ...tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase, TruncationStrategy +from ...utils import is_tf_available, is_torch_available, logging +from .utils import DataProcessor + + +# Store the tokenizers which insert 2 separators tokens +MULTI_SEP_TOKENS_TOKENIZERS_SET = {"roberta", "camembert", "bart", "mpnet"} + + +if is_torch_available(): + import torch + from torch.utils.data import TensorDataset + +if is_tf_available(): + import tensorflow as tf + +logger = logging.get_logger(__name__) + + +def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text): + """Returns tokenized answer spans that better match the annotated answer.""" + tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text)) + + for new_start in range(input_start, input_end + 1): + for new_end in range(input_end, new_start - 1, -1): + text_span = " ".join(doc_tokens[new_start : (new_end + 1)]) + if text_span == tok_answer_text: + return (new_start, new_end) + + return (input_start, input_end) + + +def _check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + best_score = None + best_span_index = None + for span_index, doc_span in enumerate(doc_spans): + end = doc_span.start + doc_span.length - 1 + if position < doc_span.start: + continue + if position > end: + continue + num_left_context = position - doc_span.start + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span.length + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +def _new_check_is_max_context(doc_spans, cur_span_index, position): + """Check if this is the 'max context' doc span for the token.""" + # if len(doc_spans) == 1: + # return True + best_score = None + best_span_index = None + for span_index, doc_span in enumerate(doc_spans): + end = doc_span["start"] + doc_span["length"] - 1 + if position < doc_span["start"]: + continue + if position > end: + continue + num_left_context = position - doc_span["start"] + num_right_context = end - position + score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"] + if best_score is None or score > best_score: + best_score = score + best_span_index = span_index + + return cur_span_index == best_span_index + + +def _is_whitespace(c): + if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F: + return True + return False + + +def squad_convert_example_to_features( + example, max_seq_length, doc_stride, max_query_length, padding_strategy, is_training +): + features = [] + if is_training and not example.is_impossible: + # Get start and end position + start_position = example.start_position + end_position = example.end_position + + # If the answer cannot be found in the text, then skip this example. + actual_text = " ".join(example.doc_tokens[start_position : (end_position + 1)]) + cleaned_answer_text = " ".join(whitespace_tokenize(example.answer_text)) + if actual_text.find(cleaned_answer_text) == -1: + logger.warning(f"Could not find answer: '{actual_text}' vs. '{cleaned_answer_text}'") + return [] + + tok_to_orig_index = [] + orig_to_tok_index = [] + all_doc_tokens = [] + for i, token in enumerate(example.doc_tokens): + orig_to_tok_index.append(len(all_doc_tokens)) + if tokenizer.__class__.__name__ in [ + "RobertaTokenizer", + "LongformerTokenizer", + "BartTokenizer", + "RobertaTokenizerFast", + "LongformerTokenizerFast", + "BartTokenizerFast", + ]: + sub_tokens = tokenizer.tokenize(token, add_prefix_space=True) + else: + sub_tokens = tokenizer.tokenize(token) + for sub_token in sub_tokens: + tok_to_orig_index.append(i) + all_doc_tokens.append(sub_token) + + if is_training and not example.is_impossible: + tok_start_position = orig_to_tok_index[example.start_position] + if example.end_position < len(example.doc_tokens) - 1: + tok_end_position = orig_to_tok_index[example.end_position + 1] - 1 + else: + tok_end_position = len(all_doc_tokens) - 1 + + (tok_start_position, tok_end_position) = _improve_answer_span( + all_doc_tokens, tok_start_position, tok_end_position, tokenizer, example.answer_text + ) + + spans = [] + + truncated_query = tokenizer.encode( + example.question_text, add_special_tokens=False, truncation=True, max_length=max_query_length + ) + + # Tokenizers who insert 2 SEP tokens in-between & need to have special handling + # in the way they compute mask of added tokens. + tokenizer_type = type(tokenizer).__name__.replace("Tokenizer", "").lower() + sequence_added_tokens = ( + tokenizer.model_max_length - tokenizer.max_len_single_sentence + 1 + if tokenizer_type in MULTI_SEP_TOKENS_TOKENIZERS_SET + else tokenizer.model_max_length - tokenizer.max_len_single_sentence + ) + sequence_pair_added_tokens = tokenizer.model_max_length - tokenizer.max_len_sentences_pair + + span_doc_tokens = all_doc_tokens + while len(spans) * doc_stride < len(all_doc_tokens): + # Define the side we want to truncate / pad and the text/pair sorting + if tokenizer.padding_side == "right": + texts = truncated_query + pairs = span_doc_tokens + truncation = TruncationStrategy.ONLY_SECOND.value + else: + texts = span_doc_tokens + pairs = truncated_query + truncation = TruncationStrategy.ONLY_FIRST.value + + encoded_dict = tokenizer.encode_plus( # TODO(thom) update this logic + texts, + pairs, + truncation=truncation, + padding=padding_strategy, + max_length=max_seq_length, + return_overflowing_tokens=True, + stride=max_seq_length - doc_stride - len(truncated_query) - sequence_pair_added_tokens, + return_token_type_ids=True, + ) + + paragraph_len = min( + len(all_doc_tokens) - len(spans) * doc_stride, + max_seq_length - len(truncated_query) - sequence_pair_added_tokens, + ) + + if tokenizer.pad_token_id in encoded_dict["input_ids"]: + if tokenizer.padding_side == "right": + non_padded_ids = encoded_dict["input_ids"][: encoded_dict["input_ids"].index(tokenizer.pad_token_id)] + else: + last_padding_id_position = ( + len(encoded_dict["input_ids"]) - 1 - encoded_dict["input_ids"][::-1].index(tokenizer.pad_token_id) + ) + non_padded_ids = encoded_dict["input_ids"][last_padding_id_position + 1 :] + + else: + non_padded_ids = encoded_dict["input_ids"] + + tokens = tokenizer.convert_ids_to_tokens(non_padded_ids) + + token_to_orig_map = {} + for i in range(paragraph_len): + index = len(truncated_query) + sequence_added_tokens + i if tokenizer.padding_side == "right" else i + token_to_orig_map[index] = tok_to_orig_index[len(spans) * doc_stride + i] + + encoded_dict["paragraph_len"] = paragraph_len + encoded_dict["tokens"] = tokens + encoded_dict["token_to_orig_map"] = token_to_orig_map + encoded_dict["truncated_query_with_special_tokens_length"] = len(truncated_query) + sequence_added_tokens + encoded_dict["token_is_max_context"] = {} + encoded_dict["start"] = len(spans) * doc_stride + encoded_dict["length"] = paragraph_len + + spans.append(encoded_dict) + + if "overflowing_tokens" not in encoded_dict or ( + "overflowing_tokens" in encoded_dict and len(encoded_dict["overflowing_tokens"]) == 0 + ): + break + span_doc_tokens = encoded_dict["overflowing_tokens"] + + for doc_span_index in range(len(spans)): + for j in range(spans[doc_span_index]["paragraph_len"]): + is_max_context = _new_check_is_max_context(spans, doc_span_index, doc_span_index * doc_stride + j) + index = ( + j + if tokenizer.padding_side == "left" + else spans[doc_span_index]["truncated_query_with_special_tokens_length"] + j + ) + spans[doc_span_index]["token_is_max_context"][index] = is_max_context + + for span in spans: + # Identify the position of the CLS token + cls_index = span["input_ids"].index(tokenizer.cls_token_id) + + # p_mask: mask with 1 for token than cannot be in the answer (0 for token which can be in an answer) + # Original TF implementation also keep the classification token (set to 0) + p_mask = np.ones_like(span["token_type_ids"]) + if tokenizer.padding_side == "right": + p_mask[len(truncated_query) + sequence_added_tokens :] = 0 + else: + p_mask[-len(span["tokens"]) : -(len(truncated_query) + sequence_added_tokens)] = 0 + + pad_token_indices = np.where(span["input_ids"] == tokenizer.pad_token_id) + special_token_indices = np.asarray( + tokenizer.get_special_tokens_mask(span["input_ids"], already_has_special_tokens=True) + ).nonzero() + + p_mask[pad_token_indices] = 1 + p_mask[special_token_indices] = 1 + + # Set the cls index to 0: the CLS index can be used for impossible answers + p_mask[cls_index] = 0 + + span_is_impossible = example.is_impossible + start_position = 0 + end_position = 0 + if is_training and not span_is_impossible: + # For training, if our document chunk does not contain an annotation + # we throw it out, since there is nothing to predict. + doc_start = span["start"] + doc_end = span["start"] + span["length"] - 1 + out_of_span = False + + if not (tok_start_position >= doc_start and tok_end_position <= doc_end): + out_of_span = True + + if out_of_span: + start_position = cls_index + end_position = cls_index + span_is_impossible = True + else: + if tokenizer.padding_side == "left": + doc_offset = 0 + else: + doc_offset = len(truncated_query) + sequence_added_tokens + + start_position = tok_start_position - doc_start + doc_offset + end_position = tok_end_position - doc_start + doc_offset + + features.append( + SquadFeatures( + span["input_ids"], + span["attention_mask"], + span["token_type_ids"], + cls_index, + p_mask.tolist(), + example_index=0, # Can not set unique_id and example_index here. They will be set after multiple processing. + unique_id=0, + paragraph_len=span["paragraph_len"], + token_is_max_context=span["token_is_max_context"], + tokens=span["tokens"], + token_to_orig_map=span["token_to_orig_map"], + start_position=start_position, + end_position=end_position, + is_impossible=span_is_impossible, + qas_id=example.qas_id, + ) + ) + return features + + +def squad_convert_example_to_features_init(tokenizer_for_convert: PreTrainedTokenizerBase): + global tokenizer + tokenizer = tokenizer_for_convert + + +def squad_convert_examples_to_features( + examples, + tokenizer, + max_seq_length, + doc_stride, + max_query_length, + is_training, + padding_strategy="max_length", + return_dataset=False, + threads=1, + tqdm_enabled=True, +): + """ + Converts a list of examples into a list of features that can be directly given as input to a model. It is + model-dependant and takes advantage of many of the tokenizer's features to create the model's inputs. + + Args: + examples: list of [`~data.processors.squad.SquadExample`] + tokenizer: an instance of a child of [`PreTrainedTokenizer`] + max_seq_length: The maximum sequence length of the inputs. + doc_stride: The stride used when the context is too large and is split across several features. + max_query_length: The maximum length of the query. + is_training: whether to create features for model evaluation or model training. + padding_strategy: Default to "max_length". Which padding strategy to use + return_dataset: Default False. Either 'pt' or 'tf'. + if 'pt': returns a torch.data.TensorDataset, if 'tf': returns a tf.data.Dataset + threads: multiple processing threads. + + + Returns: + list of [`~data.processors.squad.SquadFeatures`] + + Example: + + ```python + processor = SquadV2Processor() + examples = processor.get_dev_examples(data_dir) + + features = squad_convert_examples_to_features( + examples=examples, + tokenizer=tokenizer, + max_seq_length=args.max_seq_length, + doc_stride=args.doc_stride, + max_query_length=args.max_query_length, + is_training=not evaluate, + ) + ```""" + # Defining helper methods + features = [] + + threads = min(threads, cpu_count()) + with Pool(threads, initializer=squad_convert_example_to_features_init, initargs=(tokenizer,)) as p: + annotate_ = partial( + squad_convert_example_to_features, + max_seq_length=max_seq_length, + doc_stride=doc_stride, + max_query_length=max_query_length, + padding_strategy=padding_strategy, + is_training=is_training, + ) + features = list( + tqdm( + p.imap(annotate_, examples, chunksize=32), + total=len(examples), + desc="convert squad examples to features", + disable=not tqdm_enabled, + ) + ) + + new_features = [] + unique_id = 1000000000 + example_index = 0 + for example_features in tqdm( + features, total=len(features), desc="add example index and unique id", disable=not tqdm_enabled + ): + if not example_features: + continue + for example_feature in example_features: + example_feature.example_index = example_index + example_feature.unique_id = unique_id + new_features.append(example_feature) + unique_id += 1 + example_index += 1 + features = new_features + del new_features + if return_dataset == "pt": + if not is_torch_available(): + raise RuntimeError("PyTorch must be installed to return a PyTorch dataset.") + + # Convert to Tensors and build dataset + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_attention_masks = torch.tensor([f.attention_mask for f in features], dtype=torch.long) + all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) + all_cls_index = torch.tensor([f.cls_index for f in features], dtype=torch.long) + all_p_mask = torch.tensor([f.p_mask for f in features], dtype=torch.float) + all_is_impossible = torch.tensor([f.is_impossible for f in features], dtype=torch.float) + + if not is_training: + all_feature_index = torch.arange(all_input_ids.size(0), dtype=torch.long) + dataset = TensorDataset( + all_input_ids, all_attention_masks, all_token_type_ids, all_feature_index, all_cls_index, all_p_mask + ) + else: + all_start_positions = torch.tensor([f.start_position for f in features], dtype=torch.long) + all_end_positions = torch.tensor([f.end_position for f in features], dtype=torch.long) + dataset = TensorDataset( + all_input_ids, + all_attention_masks, + all_token_type_ids, + all_start_positions, + all_end_positions, + all_cls_index, + all_p_mask, + all_is_impossible, + ) + + return features, dataset + elif return_dataset == "tf": + if not is_tf_available(): + raise RuntimeError("TensorFlow must be installed to return a TensorFlow dataset.") + + def gen(): + for i, ex in enumerate(features): + if ex.token_type_ids is None: + yield ( + { + "input_ids": ex.input_ids, + "attention_mask": ex.attention_mask, + "feature_index": i, + "qas_id": ex.qas_id, + }, + { + "start_positions": ex.start_position, + "end_positions": ex.end_position, + "cls_index": ex.cls_index, + "p_mask": ex.p_mask, + "is_impossible": ex.is_impossible, + }, + ) + else: + yield ( + { + "input_ids": ex.input_ids, + "attention_mask": ex.attention_mask, + "token_type_ids": ex.token_type_ids, + "feature_index": i, + "qas_id": ex.qas_id, + }, + { + "start_positions": ex.start_position, + "end_positions": ex.end_position, + "cls_index": ex.cls_index, + "p_mask": ex.p_mask, + "is_impossible": ex.is_impossible, + }, + ) + + # Why have we split the batch into a tuple? PyTorch just has a list of tensors. + if "token_type_ids" in tokenizer.model_input_names: + train_types = ( + { + "input_ids": tf.int32, + "attention_mask": tf.int32, + "token_type_ids": tf.int32, + "feature_index": tf.int64, + "qas_id": tf.string, + }, + { + "start_positions": tf.int64, + "end_positions": tf.int64, + "cls_index": tf.int64, + "p_mask": tf.int32, + "is_impossible": tf.int32, + }, + ) + + train_shapes = ( + { + "input_ids": tf.TensorShape([None]), + "attention_mask": tf.TensorShape([None]), + "token_type_ids": tf.TensorShape([None]), + "feature_index": tf.TensorShape([]), + "qas_id": tf.TensorShape([]), + }, + { + "start_positions": tf.TensorShape([]), + "end_positions": tf.TensorShape([]), + "cls_index": tf.TensorShape([]), + "p_mask": tf.TensorShape([None]), + "is_impossible": tf.TensorShape([]), + }, + ) + else: + train_types = ( + {"input_ids": tf.int32, "attention_mask": tf.int32, "feature_index": tf.int64, "qas_id": tf.string}, + { + "start_positions": tf.int64, + "end_positions": tf.int64, + "cls_index": tf.int64, + "p_mask": tf.int32, + "is_impossible": tf.int32, + }, + ) + + train_shapes = ( + { + "input_ids": tf.TensorShape([None]), + "attention_mask": tf.TensorShape([None]), + "feature_index": tf.TensorShape([]), + "qas_id": tf.TensorShape([]), + }, + { + "start_positions": tf.TensorShape([]), + "end_positions": tf.TensorShape([]), + "cls_index": tf.TensorShape([]), + "p_mask": tf.TensorShape([None]), + "is_impossible": tf.TensorShape([]), + }, + ) + + return tf.data.Dataset.from_generator(gen, train_types, train_shapes) + else: + return features + + +class SquadProcessor(DataProcessor): + """ + Processor for the SQuAD data set. overridden by SquadV1Processor and SquadV2Processor, used by the version 1.1 and + version 2.0 of SQuAD, respectively. + """ + + train_file = None + dev_file = None + + def _get_example_from_tensor_dict(self, tensor_dict, evaluate=False): + if not evaluate: + answer = tensor_dict["answers"]["text"][0].numpy().decode("utf-8") + answer_start = tensor_dict["answers"]["answer_start"][0].numpy() + answers = [] + else: + answers = [ + {"answer_start": start.numpy(), "text": text.numpy().decode("utf-8")} + for start, text in zip(tensor_dict["answers"]["answer_start"], tensor_dict["answers"]["text"]) + ] + + answer = None + answer_start = None + + return SquadExample( + qas_id=tensor_dict["id"].numpy().decode("utf-8"), + question_text=tensor_dict["question"].numpy().decode("utf-8"), + context_text=tensor_dict["context"].numpy().decode("utf-8"), + answer_text=answer, + start_position_character=answer_start, + title=tensor_dict["title"].numpy().decode("utf-8"), + answers=answers, + ) + + def get_examples_from_dataset(self, dataset, evaluate=False): + """ + Creates a list of [`~data.processors.squad.SquadExample`] using a TFDS dataset. + + Args: + dataset: The tfds dataset loaded from *tensorflow_datasets.load("squad")* + evaluate: Boolean specifying if in evaluation mode or in training mode + + Returns: + List of SquadExample + + Examples: + + ```python + >>> import tensorflow_datasets as tfds + + >>> dataset = tfds.load("squad") + + >>> training_examples = get_examples_from_dataset(dataset, evaluate=False) + >>> evaluation_examples = get_examples_from_dataset(dataset, evaluate=True) + ```""" + + if evaluate: + dataset = dataset["validation"] + else: + dataset = dataset["train"] + + examples = [] + for tensor_dict in tqdm(dataset): + examples.append(self._get_example_from_tensor_dict(tensor_dict, evaluate=evaluate)) + + return examples + + def get_train_examples(self, data_dir, filename=None): + """ + Returns the training examples from the data directory. + + Args: + data_dir: Directory containing the data files used for training and evaluating. + filename: None by default, specify this if the training file has a different name than the original one + which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively. + + """ + if data_dir is None: + data_dir = "" + + if self.train_file is None: + raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") + + with open( + os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8" + ) as reader: + input_data = json.load(reader)["data"] + return self._create_examples(input_data, "train") + + def get_dev_examples(self, data_dir, filename=None): + """ + Returns the evaluation example from the data directory. + + Args: + data_dir: Directory containing the data files used for training and evaluating. + filename: None by default, specify this if the evaluation file has a different name than the original one + which is `dev-v1.1.json` and `dev-v2.0.json` for squad versions 1.1 and 2.0 respectively. + """ + if data_dir is None: + data_dir = "" + + if self.dev_file is None: + raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor") + + with open( + os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8" + ) as reader: + input_data = json.load(reader)["data"] + return self._create_examples(input_data, "dev") + + def _create_examples(self, input_data, set_type): + is_training = set_type == "train" + examples = [] + for entry in tqdm(input_data): + title = entry["title"] + for paragraph in entry["paragraphs"]: + context_text = paragraph["context"] + for qa in paragraph["qas"]: + qas_id = qa["id"] + question_text = qa["question"] + start_position_character = None + answer_text = None + answers = [] + + is_impossible = qa.get("is_impossible", False) + if not is_impossible: + if is_training: + answer = qa["answers"][0] + answer_text = answer["text"] + start_position_character = answer["answer_start"] + else: + answers = qa["answers"] + + example = SquadExample( + qas_id=qas_id, + question_text=question_text, + context_text=context_text, + answer_text=answer_text, + start_position_character=start_position_character, + title=title, + is_impossible=is_impossible, + answers=answers, + ) + examples.append(example) + return examples + + +class SquadV1Processor(SquadProcessor): + train_file = "train-v1.1.json" + dev_file = "dev-v1.1.json" + + +class SquadV2Processor(SquadProcessor): + train_file = "train-v2.0.json" + dev_file = "dev-v2.0.json" + + +class SquadExample: + """ + A single training/test example for the Squad dataset, as loaded from disk. + + Args: + qas_id: The example's unique identifier + question_text: The question string + context_text: The context string + answer_text: The answer string + start_position_character: The character position of the start of the answer + title: The title of the example + answers: None by default, this is used during evaluation. Holds answers as well as their start positions. + is_impossible: False by default, set to True if the example has no possible answer. + """ + + def __init__( + self, + qas_id, + question_text, + context_text, + answer_text, + start_position_character, + title, + answers=[], + is_impossible=False, + ): + self.qas_id = qas_id + self.question_text = question_text + self.context_text = context_text + self.answer_text = answer_text + self.title = title + self.is_impossible = is_impossible + self.answers = answers + + self.start_position, self.end_position = 0, 0 + + doc_tokens = [] + char_to_word_offset = [] + prev_is_whitespace = True + + # Split on whitespace so that different tokens may be attributed to their original position. + for c in self.context_text: + if _is_whitespace(c): + prev_is_whitespace = True + else: + if prev_is_whitespace: + doc_tokens.append(c) + else: + doc_tokens[-1] += c + prev_is_whitespace = False + char_to_word_offset.append(len(doc_tokens) - 1) + + self.doc_tokens = doc_tokens + self.char_to_word_offset = char_to_word_offset + + # Start and end positions only has a value during evaluation. + if start_position_character is not None and not is_impossible: + self.start_position = char_to_word_offset[start_position_character] + self.end_position = char_to_word_offset[ + min(start_position_character + len(answer_text) - 1, len(char_to_word_offset) - 1) + ] + + +class SquadFeatures: + """ + Single squad example features to be fed to a model. Those features are model-specific and can be crafted from + [`~data.processors.squad.SquadExample`] using the + :method:*~transformers.data.processors.squad.squad_convert_examples_to_features* method. + + Args: + input_ids: Indices of input sequence tokens in the vocabulary. + attention_mask: Mask to avoid performing attention on padding token indices. + token_type_ids: Segment token indices to indicate first and second portions of the inputs. + cls_index: the index of the CLS token. + p_mask: Mask identifying tokens that can be answers vs. tokens that cannot. + Mask with 1 for tokens than cannot be in the answer and 0 for token that can be in an answer + example_index: the index of the example + unique_id: The unique Feature identifier + paragraph_len: The length of the context + token_is_max_context: + List of booleans identifying which tokens have their maximum context in this feature object. If a token + does not have their maximum context in this feature object, it means that another feature object has more + information related to that token and should be prioritized over this feature for that token. + tokens: list of tokens corresponding to the input ids + token_to_orig_map: mapping between the tokens and the original text, needed in order to identify the answer. + start_position: start of the answer token index + end_position: end of the answer token index + encoding: optionally store the BatchEncoding with the fast-tokenizer alignment methods. + """ + + def __init__( + self, + input_ids, + attention_mask, + token_type_ids, + cls_index, + p_mask, + example_index, + unique_id, + paragraph_len, + token_is_max_context, + tokens, + token_to_orig_map, + start_position, + end_position, + is_impossible, + qas_id: str = None, + encoding: BatchEncoding = None, + ): + self.input_ids = input_ids + self.attention_mask = attention_mask + self.token_type_ids = token_type_ids + self.cls_index = cls_index + self.p_mask = p_mask + + self.example_index = example_index + self.unique_id = unique_id + self.paragraph_len = paragraph_len + self.token_is_max_context = token_is_max_context + self.tokens = tokens + self.token_to_orig_map = token_to_orig_map + + self.start_position = start_position + self.end_position = end_position + self.is_impossible = is_impossible + self.qas_id = qas_id + + self.encoding = encoding + + +class SquadResult: + """ + Constructs a SquadResult which can be used to evaluate a model's output on the SQuAD dataset. + + Args: + unique_id: The unique identifier corresponding to that example. + start_logits: The logits corresponding to the start of the answer + end_logits: The logits corresponding to the end of the answer + """ + + def __init__(self, unique_id, start_logits, end_logits, start_top_index=None, end_top_index=None, cls_logits=None): + self.start_logits = start_logits + self.end_logits = end_logits + self.unique_id = unique_id + + if start_top_index: + self.start_top_index = start_top_index + self.end_top_index = end_top_index + self.cls_logits = cls_logits diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..936f5a51e9fcf4c4189eb444e567d761e8fa0865 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/utils.py @@ -0,0 +1,349 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import csv +import dataclasses +import json +from dataclasses import dataclass +from typing import List, Optional, Union + +from ...utils import is_tf_available, is_torch_available, logging + + +logger = logging.get_logger(__name__) + + +@dataclass +class InputExample: + """ + A single training/test example for simple sequence classification. + + Args: + guid: Unique id for the example. + text_a: string. The untokenized text of the first sequence. For single + sequence tasks, only this sequence must be specified. + text_b: (Optional) string. The untokenized text of the second sequence. + Only must be specified for sequence pair tasks. + label: (Optional) string. The label of the example. This should be + specified for train and dev examples, but not for test examples. + """ + + guid: str + text_a: str + text_b: Optional[str] = None + label: Optional[str] = None + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(dataclasses.asdict(self), indent=2) + "\n" + + +@dataclass(frozen=True) +class InputFeatures: + """ + A single set of features of data. Property names are the same names as the corresponding inputs to a model. + + Args: + input_ids: Indices of input sequence tokens in the vocabulary. + attention_mask: Mask to avoid performing attention on padding token indices. + Mask values selected in `[0, 1]`: Usually `1` for tokens that are NOT MASKED, `0` for MASKED (padded) + tokens. + token_type_ids: (Optional) Segment token indices to indicate first and second + portions of the inputs. Only some models use them. + label: (Optional) Label corresponding to the input. Int for classification problems, + float for regression problems. + """ + + input_ids: List[int] + attention_mask: Optional[List[int]] = None + token_type_ids: Optional[List[int]] = None + label: Optional[Union[int, float]] = None + + def to_json_string(self): + """Serializes this instance to a JSON string.""" + return json.dumps(dataclasses.asdict(self)) + "\n" + + +class DataProcessor: + """Base class for data converters for sequence classification data sets.""" + + def get_example_from_tensor_dict(self, tensor_dict): + """ + Gets an example from a dict with tensorflow tensors. + + Args: + tensor_dict: Keys and values should match the corresponding Glue + tensorflow_dataset examples. + """ + raise NotImplementedError() + + def get_train_examples(self, data_dir): + """Gets a collection of [`InputExample`] for the train set.""" + raise NotImplementedError() + + def get_dev_examples(self, data_dir): + """Gets a collection of [`InputExample`] for the dev set.""" + raise NotImplementedError() + + def get_test_examples(self, data_dir): + """Gets a collection of [`InputExample`] for the test set.""" + raise NotImplementedError() + + def get_labels(self): + """Gets the list of labels for this data set.""" + raise NotImplementedError() + + def tfds_map(self, example): + """ + Some tensorflow_datasets datasets are not formatted the same way the GLUE datasets are. This method converts + examples to the correct format. + """ + if len(self.get_labels()) > 1: + example.label = self.get_labels()[int(example.label)] + return example + + @classmethod + def _read_tsv(cls, input_file, quotechar=None): + """Reads a tab separated value file.""" + with open(input_file, "r", encoding="utf-8-sig") as f: + return list(csv.reader(f, delimiter="\t", quotechar=quotechar)) + + +class SingleSentenceClassificationProcessor(DataProcessor): + """Generic processor for a single sentence classification data set.""" + + def __init__(self, labels=None, examples=None, mode="classification", verbose=False): + self.labels = [] if labels is None else labels + self.examples = [] if examples is None else examples + self.mode = mode + self.verbose = verbose + + def __len__(self): + return len(self.examples) + + def __getitem__(self, idx): + if isinstance(idx, slice): + return SingleSentenceClassificationProcessor(labels=self.labels, examples=self.examples[idx]) + return self.examples[idx] + + @classmethod + def create_from_csv( + cls, file_name, split_name="", column_label=0, column_text=1, column_id=None, skip_first_row=False, **kwargs + ): + processor = cls(**kwargs) + processor.add_examples_from_csv( + file_name, + split_name=split_name, + column_label=column_label, + column_text=column_text, + column_id=column_id, + skip_first_row=skip_first_row, + overwrite_labels=True, + overwrite_examples=True, + ) + return processor + + @classmethod + def create_from_examples(cls, texts_or_text_and_labels, labels=None, **kwargs): + processor = cls(**kwargs) + processor.add_examples(texts_or_text_and_labels, labels=labels) + return processor + + def add_examples_from_csv( + self, + file_name, + split_name="", + column_label=0, + column_text=1, + column_id=None, + skip_first_row=False, + overwrite_labels=False, + overwrite_examples=False, + ): + lines = self._read_tsv(file_name) + if skip_first_row: + lines = lines[1:] + texts = [] + labels = [] + ids = [] + for i, line in enumerate(lines): + texts.append(line[column_text]) + labels.append(line[column_label]) + if column_id is not None: + ids.append(line[column_id]) + else: + guid = f"{split_name}-{i}" if split_name else str(i) + ids.append(guid) + + return self.add_examples( + texts, labels, ids, overwrite_labels=overwrite_labels, overwrite_examples=overwrite_examples + ) + + def add_examples( + self, texts_or_text_and_labels, labels=None, ids=None, overwrite_labels=False, overwrite_examples=False + ): + if labels is not None and len(texts_or_text_and_labels) != len(labels): + raise ValueError( + f"Text and labels have mismatched lengths {len(texts_or_text_and_labels)} and {len(labels)}" + ) + if ids is not None and len(texts_or_text_and_labels) != len(ids): + raise ValueError(f"Text and ids have mismatched lengths {len(texts_or_text_and_labels)} and {len(ids)}") + if ids is None: + ids = [None] * len(texts_or_text_and_labels) + if labels is None: + labels = [None] * len(texts_or_text_and_labels) + examples = [] + added_labels = set() + for text_or_text_and_label, label, guid in zip(texts_or_text_and_labels, labels, ids): + if isinstance(text_or_text_and_label, (tuple, list)) and label is None: + text, label = text_or_text_and_label + else: + text = text_or_text_and_label + added_labels.add(label) + examples.append(InputExample(guid=guid, text_a=text, text_b=None, label=label)) + + # Update examples + if overwrite_examples: + self.examples = examples + else: + self.examples.extend(examples) + + # Update labels + if overwrite_labels: + self.labels = list(added_labels) + else: + self.labels = list(set(self.labels).union(added_labels)) + + return self.examples + + def get_features( + self, + tokenizer, + max_length=None, + pad_on_left=False, + pad_token=0, + mask_padding_with_zero=True, + return_tensors=None, + ): + """ + Convert examples in a list of `InputFeatures` + + Args: + tokenizer: Instance of a tokenizer that will tokenize the examples + max_length: Maximum example length + pad_on_left: If set to `True`, the examples will be padded on the left rather than on the right (default) + pad_token: Padding token + mask_padding_with_zero: If set to `True`, the attention mask will be filled by `1` for actual values + and by `0` for padded values. If set to `False`, inverts it (`1` for padded values, `0` for actual + values) + + Returns: + If the `examples` input is a `tf.data.Dataset`, will return a `tf.data.Dataset` containing the + task-specific features. If the input is a list of `InputExamples`, will return a list of task-specific + `InputFeatures` which can be fed to the model. + + """ + if max_length is None: + max_length = tokenizer.max_len + + label_map = {label: i for i, label in enumerate(self.labels)} + + all_input_ids = [] + for ex_index, example in enumerate(self.examples): + if ex_index % 10000 == 0: + logger.info(f"Tokenizing example {ex_index}") + + input_ids = tokenizer.encode( + example.text_a, + add_special_tokens=True, + max_length=min(max_length, tokenizer.max_len), + ) + all_input_ids.append(input_ids) + + batch_length = max(len(input_ids) for input_ids in all_input_ids) + + features = [] + for ex_index, (input_ids, example) in enumerate(zip(all_input_ids, self.examples)): + if ex_index % 10000 == 0: + logger.info(f"Writing example {ex_index}/{len(self.examples)}") + # The mask has 1 for real tokens and 0 for padding tokens. Only real + # tokens are attended to. + attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids) + + # Zero-pad up to the sequence length. + padding_length = batch_length - len(input_ids) + if pad_on_left: + input_ids = ([pad_token] * padding_length) + input_ids + attention_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + attention_mask + else: + input_ids = input_ids + ([pad_token] * padding_length) + attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length) + + if len(input_ids) != batch_length: + raise ValueError(f"Error with input length {len(input_ids)} vs {batch_length}") + if len(attention_mask) != batch_length: + raise ValueError(f"Error with input length {len(attention_mask)} vs {batch_length}") + + if self.mode == "classification": + label = label_map[example.label] + elif self.mode == "regression": + label = float(example.label) + else: + raise ValueError(self.mode) + + if ex_index < 5 and self.verbose: + logger.info("*** Example ***") + logger.info(f"guid: {example.guid}") + logger.info(f"input_ids: {' '.join([str(x) for x in input_ids])}") + logger.info(f"attention_mask: {' '.join([str(x) for x in attention_mask])}") + logger.info(f"label: {example.label} (id = {label})") + + features.append(InputFeatures(input_ids=input_ids, attention_mask=attention_mask, label=label)) + + if return_tensors is None: + return features + elif return_tensors == "tf": + if not is_tf_available(): + raise RuntimeError("return_tensors set to 'tf' but TensorFlow 2.0 can't be imported") + import tensorflow as tf + + def gen(): + for ex in features: + yield ({"input_ids": ex.input_ids, "attention_mask": ex.attention_mask}, ex.label) + + dataset = tf.data.Dataset.from_generator( + gen, + ({"input_ids": tf.int32, "attention_mask": tf.int32}, tf.int64), + ({"input_ids": tf.TensorShape([None]), "attention_mask": tf.TensorShape([None])}, tf.TensorShape([])), + ) + return dataset + elif return_tensors == "pt": + if not is_torch_available(): + raise RuntimeError("return_tensors set to 'pt' but PyTorch can't be imported") + import torch + from torch.utils.data import TensorDataset + + all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) + all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) + if self.mode == "classification": + all_labels = torch.tensor([f.label for f in features], dtype=torch.long) + elif self.mode == "regression": + all_labels = torch.tensor([f.label for f in features], dtype=torch.float) + + dataset = TensorDataset(all_input_ids, all_attention_mask, all_labels) + return dataset + else: + raise ValueError("return_tensors should be one of 'tf' or 'pt'") diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/xnli.py b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/xnli.py new file mode 100644 index 0000000000000000000000000000000000000000..3f1a11fcd6b4ef167fc77fb1cc6d9acbbadaccf0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/data/processors/xnli.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" XNLI utils (dataset loading and evaluation)""" + + +import os + +from ...utils import logging +from .utils import DataProcessor, InputExample + + +logger = logging.get_logger(__name__) + + +class XnliProcessor(DataProcessor): + """ + Processor for the XNLI dataset. Adapted from + https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/run_classifier.py#L207 + """ + + def __init__(self, language, train_language=None): + self.language = language + self.train_language = train_language + + def get_train_examples(self, data_dir): + """See base class.""" + lg = self.language if self.train_language is None else self.train_language + lines = self._read_tsv(os.path.join(data_dir, f"XNLI-MT-1.0/multinli/multinli.train.{lg}.tsv")) + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + guid = f"train-{i}" + text_a = line[0] + text_b = line[1] + label = "contradiction" if line[2] == "contradictory" else line[2] + if not isinstance(text_a, str): + raise ValueError(f"Training input {text_a} is not a string") + if not isinstance(text_b, str): + raise ValueError(f"Training input {text_b} is not a string") + if not isinstance(label, str): + raise ValueError(f"Training label {label} is not a string") + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_test_examples(self, data_dir): + """See base class.""" + lines = self._read_tsv(os.path.join(data_dir, "XNLI-1.0/xnli.test.tsv")) + examples = [] + for i, line in enumerate(lines): + if i == 0: + continue + language = line[0] + if language != self.language: + continue + guid = f"test-{i}" + text_a = line[6] + text_b = line[7] + label = line[1] + if not isinstance(text_a, str): + raise ValueError(f"Training input {text_a} is not a string") + if not isinstance(text_b, str): + raise ValueError(f"Training input {text_b} is not a string") + if not isinstance(label, str): + raise ValueError(f"Training label {label} is not a string") + examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) + return examples + + def get_labels(self): + """See base class.""" + return ["contradiction", "entailment", "neutral"] + + +xnli_processors = { + "xnli": XnliProcessor, +} + +xnli_output_modes = { + "xnli": "classification", +} + +xnli_tasks_num_labels = { + "xnli": 3, +} diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6653f3c8d123e9d0a2a48b5b7d97056861f45d15 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__init__.py @@ -0,0 +1,310 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ..utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available + + +_import_structure = { + "configuration_utils": ["GenerationConfig", "GenerationMode"], + "streamers": ["TextIteratorStreamer", "TextStreamer"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["beam_constraints"] = [ + "Constraint", + "ConstraintListState", + "DisjunctiveConstraint", + "PhrasalConstraint", + ] + _import_structure["beam_search"] = [ + "BeamHypotheses", + "BeamScorer", + "BeamSearchScorer", + "ConstrainedBeamSearchScorer", + ] + _import_structure["candidate_generator"] = [ + "AssistedCandidateGenerator", + "CandidateGenerator", + "PromptLookupCandidateGenerator", + ] + _import_structure["logits_process"] = [ + "AlternatingCodebooksLogitsProcessor", + "ClassifierFreeGuidanceLogitsProcessor", + "EncoderNoRepeatNGramLogitsProcessor", + "EncoderRepetitionPenaltyLogitsProcessor", + "EpsilonLogitsWarper", + "EtaLogitsWarper", + "ExponentialDecayLengthPenalty", + "ForcedBOSTokenLogitsProcessor", + "ForcedEOSTokenLogitsProcessor", + "ForceTokensLogitsProcessor", + "HammingDiversityLogitsProcessor", + "InfNanRemoveLogitsProcessor", + "LogitNormalization", + "LogitsProcessor", + "LogitsProcessorList", + "LogitsWarper", + "MinLengthLogitsProcessor", + "MinNewTokensLengthLogitsProcessor", + "NoBadWordsLogitsProcessor", + "NoRepeatNGramLogitsProcessor", + "PrefixConstrainedLogitsProcessor", + "RepetitionPenaltyLogitsProcessor", + "SequenceBiasLogitsProcessor", + "SuppressTokensLogitsProcessor", + "SuppressTokensAtBeginLogitsProcessor", + "TemperatureLogitsWarper", + "TopKLogitsWarper", + "TopPLogitsWarper", + "TypicalLogitsWarper", + "UnbatchedClassifierFreeGuidanceLogitsProcessor", + "WhisperTimeStampLogitsProcessor", + ] + _import_structure["stopping_criteria"] = [ + "MaxNewTokensCriteria", + "MaxLengthCriteria", + "MaxTimeCriteria", + "EosTokenCriteria", + "StoppingCriteria", + "StoppingCriteriaList", + "validate_stopping_criteria", + ] + _import_structure["utils"] = [ + "GenerationMixin", + "GreedySearchEncoderDecoderOutput", + "GreedySearchDecoderOnlyOutput", + "SampleEncoderDecoderOutput", + "SampleDecoderOnlyOutput", + "BeamSearchEncoderDecoderOutput", + "BeamSearchDecoderOnlyOutput", + "BeamSampleEncoderDecoderOutput", + "BeamSampleDecoderOnlyOutput", + "ContrastiveSearchEncoderDecoderOutput", + "ContrastiveSearchDecoderOnlyOutput", + "GenerateBeamDecoderOnlyOutput", + "GenerateBeamEncoderDecoderOutput", + "GenerateDecoderOnlyOutput", + "GenerateEncoderDecoderOutput", + ] + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tf_logits_process"] = [ + "TFForcedBOSTokenLogitsProcessor", + "TFForcedEOSTokenLogitsProcessor", + "TFForceTokensLogitsProcessor", + "TFLogitsProcessor", + "TFLogitsProcessorList", + "TFLogitsWarper", + "TFMinLengthLogitsProcessor", + "TFNoBadWordsLogitsProcessor", + "TFNoRepeatNGramLogitsProcessor", + "TFRepetitionPenaltyLogitsProcessor", + "TFSuppressTokensAtBeginLogitsProcessor", + "TFSuppressTokensLogitsProcessor", + "TFTemperatureLogitsWarper", + "TFTopKLogitsWarper", + "TFTopPLogitsWarper", + ] + _import_structure["tf_utils"] = [ + "TFGenerationMixin", + "TFGreedySearchDecoderOnlyOutput", + "TFGreedySearchEncoderDecoderOutput", + "TFSampleEncoderDecoderOutput", + "TFSampleDecoderOnlyOutput", + "TFBeamSearchEncoderDecoderOutput", + "TFBeamSearchDecoderOnlyOutput", + "TFBeamSampleEncoderDecoderOutput", + "TFBeamSampleDecoderOnlyOutput", + "TFContrastiveSearchEncoderDecoderOutput", + "TFContrastiveSearchDecoderOnlyOutput", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["flax_logits_process"] = [ + "FlaxForcedBOSTokenLogitsProcessor", + "FlaxForcedEOSTokenLogitsProcessor", + "FlaxForceTokensLogitsProcessor", + "FlaxLogitsProcessor", + "FlaxLogitsProcessorList", + "FlaxLogitsWarper", + "FlaxMinLengthLogitsProcessor", + "FlaxSuppressTokensAtBeginLogitsProcessor", + "FlaxSuppressTokensLogitsProcessor", + "FlaxTemperatureLogitsWarper", + "FlaxTopKLogitsWarper", + "FlaxTopPLogitsWarper", + "FlaxWhisperTimeStampLogitsProcessor", + "FlaxNoRepeatNGramLogitsProcessor", + ] + _import_structure["flax_utils"] = [ + "FlaxGenerationMixin", + "FlaxGreedySearchOutput", + "FlaxSampleOutput", + "FlaxBeamSearchOutput", + ] + +if TYPE_CHECKING: + from .configuration_utils import GenerationConfig, GenerationMode + from .streamers import TextIteratorStreamer, TextStreamer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .beam_constraints import Constraint, ConstraintListState, DisjunctiveConstraint, PhrasalConstraint + from .beam_search import BeamHypotheses, BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer + from .candidate_generator import AssistedCandidateGenerator, CandidateGenerator, PromptLookupCandidateGenerator + from .logits_process import ( + AlternatingCodebooksLogitsProcessor, + ClassifierFreeGuidanceLogitsProcessor, + EncoderNoRepeatNGramLogitsProcessor, + EncoderRepetitionPenaltyLogitsProcessor, + EpsilonLogitsWarper, + EtaLogitsWarper, + ExponentialDecayLengthPenalty, + ForcedBOSTokenLogitsProcessor, + ForcedEOSTokenLogitsProcessor, + ForceTokensLogitsProcessor, + HammingDiversityLogitsProcessor, + InfNanRemoveLogitsProcessor, + LogitNormalization, + LogitsProcessor, + LogitsProcessorList, + LogitsWarper, + MinLengthLogitsProcessor, + MinNewTokensLengthLogitsProcessor, + NoBadWordsLogitsProcessor, + NoRepeatNGramLogitsProcessor, + PrefixConstrainedLogitsProcessor, + RepetitionPenaltyLogitsProcessor, + SequenceBiasLogitsProcessor, + SuppressTokensAtBeginLogitsProcessor, + SuppressTokensLogitsProcessor, + TemperatureLogitsWarper, + TopKLogitsWarper, + TopPLogitsWarper, + TypicalLogitsWarper, + UnbatchedClassifierFreeGuidanceLogitsProcessor, + WhisperTimeStampLogitsProcessor, + ) + from .stopping_criteria import ( + EosTokenCriteria, + MaxLengthCriteria, + MaxNewTokensCriteria, + MaxTimeCriteria, + StoppingCriteria, + StoppingCriteriaList, + validate_stopping_criteria, + ) + from .utils import ( + BeamSampleDecoderOnlyOutput, + BeamSampleEncoderDecoderOutput, + BeamSearchDecoderOnlyOutput, + BeamSearchEncoderDecoderOutput, + ContrastiveSearchDecoderOnlyOutput, + ContrastiveSearchEncoderDecoderOutput, + GenerateBeamDecoderOnlyOutput, + GenerateBeamEncoderDecoderOutput, + GenerateDecoderOnlyOutput, + GenerateEncoderDecoderOutput, + GenerationMixin, + GreedySearchDecoderOnlyOutput, + GreedySearchEncoderDecoderOutput, + SampleDecoderOnlyOutput, + SampleEncoderDecoderOutput, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tf_logits_process import ( + TFForcedBOSTokenLogitsProcessor, + TFForcedEOSTokenLogitsProcessor, + TFForceTokensLogitsProcessor, + TFLogitsProcessor, + TFLogitsProcessorList, + TFLogitsWarper, + TFMinLengthLogitsProcessor, + TFNoBadWordsLogitsProcessor, + TFNoRepeatNGramLogitsProcessor, + TFRepetitionPenaltyLogitsProcessor, + TFSuppressTokensAtBeginLogitsProcessor, + TFSuppressTokensLogitsProcessor, + TFTemperatureLogitsWarper, + TFTopKLogitsWarper, + TFTopPLogitsWarper, + ) + from .tf_utils import ( + TFBeamSampleDecoderOnlyOutput, + TFBeamSampleEncoderDecoderOutput, + TFBeamSearchDecoderOnlyOutput, + TFBeamSearchEncoderDecoderOutput, + TFContrastiveSearchDecoderOnlyOutput, + TFContrastiveSearchEncoderDecoderOutput, + TFGenerationMixin, + TFGreedySearchDecoderOnlyOutput, + TFGreedySearchEncoderDecoderOutput, + TFSampleDecoderOnlyOutput, + TFSampleEncoderDecoderOutput, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .flax_logits_process import ( + FlaxForcedBOSTokenLogitsProcessor, + FlaxForcedEOSTokenLogitsProcessor, + FlaxForceTokensLogitsProcessor, + FlaxLogitsProcessor, + FlaxLogitsProcessorList, + FlaxLogitsWarper, + FlaxMinLengthLogitsProcessor, + FlaxNoRepeatNGramLogitsProcessor, + FlaxSuppressTokensAtBeginLogitsProcessor, + FlaxSuppressTokensLogitsProcessor, + FlaxTemperatureLogitsWarper, + FlaxTopKLogitsWarper, + FlaxTopPLogitsWarper, + FlaxWhisperTimeStampLogitsProcessor, + ) + from .flax_utils import FlaxBeamSearchOutput, FlaxGenerationMixin, FlaxGreedySearchOutput, FlaxSampleOutput +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06f194ced622c77bd639dc2ee3fb7a1da8e9a275 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95f42f93427a306e16d56cb485dd85af54258a48 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_constraints.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61886c3c974914ccc2754d997d04a329b49eaa80 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/beam_search.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4fb887a7766e6957923c4ed6e85d9c35d7b435b Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/candidate_generator.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc8776c5db5b5e16af31b8126c11552304d6da44 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/configuration_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d49320df06ab1a732275d9d96fc51e534dd8b9d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_logits_process.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f61a3002792b9dd6272ff8bd1ff0e80ce9ec8f35 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/flax_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7500eca49be37ea4b85438f10d3c0e7b53767382 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/logits_process.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1b39e97250a323220baecae330d59a56c1b671ae Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/stopping_criteria.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10ca5df79a91fa3330a3a91626e7979e926f4929 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/streamers.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be132979aa7772ceb07698da5118b878eeaf5b7a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_logits_process.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b57de78acf95b2df5b687961797295efbea38a0c Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/tf_utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47d767ac319518cca22221cb9e1ac1b5974e52b0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/generation/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/beam_constraints.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/beam_constraints.py new file mode 100644 index 0000000000000000000000000000000000000000..b53c4512427a8793449da9f68c39a12527721d40 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/beam_constraints.py @@ -0,0 +1,521 @@ +from abc import ABC, abstractmethod +from typing import List, Optional + + +class Constraint(ABC): + r"""Abstract base class for all constraints that can be applied during generation. + It must define how the constraint can be satisfied. + + All classes that inherit Constraint must follow the requirement that + + ```py + completed = False + while not completed: + _, completed = constraint.update(constraint.advance()) + ``` + + will always terminate (halt). + """ + + def __init__(self): + # test for the above condition + self.test() + + def test(self): + """ + Tests whether this constraint has been properly defined. + """ + counter = 0 + completed = False + while not completed: + if counter == 1: + self.reset() + advance = self.advance() + if not self.does_advance(advance): + raise Exception( + "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." + ) + + stepped, completed, reset = self.update(advance) + counter += 1 + + if counter > 10000: + raise Exception("update() does not fulfill the constraint.") + + if self.remaining() != 0: + raise Exception("Custom Constraint is not defined correctly.") + + @abstractmethod + def advance(self): + """ + When called, returns the token that would take this constraint one step closer to being fulfilled. + + Return: + token_ids(`torch.tensor`): Must be a tensor of a list of indexable tokens, not some integer. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + @abstractmethod + def does_advance(self, token_id: int): + """ + Reads in a token and returns whether it creates progress. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + @abstractmethod + def update(self, token_id: int): + """ + Reads in a token and returns booleans that indicate the progress made by it. This function will update the + state of this object unlikes `does_advance(self, token_id: int)`. + + This isn't to test whether a certain token will advance the progress; it's to update its state as if it has + been generated. This becomes important if token_id != desired token (refer to else statement in + PhrasalConstraint) + + Args: + token_id(`int`): + The id of a newly generated token in the beam search. + Return: + stepped(`bool`): + Whether this constraint has become one step closer to being fulfuilled. + completed(`bool`): + Whether this constraint has been completely fulfilled by this token being generated. + reset (`bool`): + Whether this constraint has reset its progress by this token being generated. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + @abstractmethod + def reset(self): + """ + Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of + a constraint is abrupted by an unwanted token. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + @abstractmethod + def remaining(self): + """ + Returns the number of remaining steps of `advance()` in order to complete this constraint. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + @abstractmethod + def copy(self, stateful=False): + """ + Creates a new instance of this constraint. + + Args: + stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state. + + Return: + constraint(`Constraint`): The same constraint as the one being called from. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class PhrasalConstraint(Constraint): + r""" + [`Constraint`] enforcing that an ordered sequence of tokens is included in the output. + + Args: + token_ids (`List[int]`): + The id of the token that must be generated by the output. + """ + + def __init__(self, token_ids: List[int]): + super(Constraint, self).__init__() + + if not isinstance(token_ids, list) or len(token_ids) == 0: + raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.") + if any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids): + raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.") + + self.token_ids = token_ids + + self.seqlen = len(self.token_ids) + self.fulfilled_idx = -1 # the index of the currently fulfilled step + self.completed = False + + def advance(self): + if self.completed: + return None + return self.token_ids[self.fulfilled_idx + 1] + + def does_advance(self, token_id: int): + if not isinstance(token_id, int): + raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") + + if self.completed: + return False + + return token_id == self.token_ids[self.fulfilled_idx + 1] + + def update(self, token_id: int): + if not isinstance(token_id, int): + raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") + + stepped = False + completed = False + reset = False + + if self.does_advance(token_id): + self.fulfilled_idx += 1 + stepped = True + if self.fulfilled_idx == (self.seqlen - 1): + completed = True + self.completed = completed + else: + # failed to make progress. + reset = True + self.reset() + return stepped, completed, reset + + def reset(self): + self.completed = False + self.fulfilled_idx = 0 + + def remaining(self): + return self.seqlen - (self.fulfilled_idx + 1) + + def copy(self, stateful=False): + new_constraint = PhrasalConstraint(self.token_ids) + + if stateful: + new_constraint.seq_len = self.seqlen + new_constraint.fulfilled_idx = self.fulfilled_idx + new_constraint.completed = self.completed + + return new_constraint + + +class DisjunctiveTrie: + def __init__(self, nested_token_ids: List[List[int]], no_subsets=True): + r""" + A helper class that builds a trie with the words represented in `nested_token_ids`. + """ + self.max_height = max([len(one) for one in nested_token_ids]) + + root = {} + for token_ids in nested_token_ids: + level = root + for tidx, token_id in enumerate(token_ids): + if token_id not in level: + level[token_id] = {} + + level = level[token_id] + + if no_subsets and self.has_subsets(root, nested_token_ids): + raise ValueError( + "Each list in `nested_token_ids` can't be a complete subset of another list, but is" + f" {nested_token_ids}." + ) + + self.trie = root + + def next_tokens(self, current_seq): + """ + The next possible tokens that will progress the trie, given the current sequence of tokens in `current_seq`. + """ + start = self.trie + + for current_token in current_seq: + start = start[current_token] + + next_tokens = list(start.keys()) + + return next_tokens + + def reached_leaf(self, current_seq): + next_tokens = self.next_tokens(current_seq) + + return len(next_tokens) == 0 + + def count_leaves(self, root): + next_nodes = list(root.values()) + if len(next_nodes) == 0: + return 1 + else: + return sum([self.count_leaves(nn) for nn in next_nodes]) + + def has_subsets(self, trie, nested_token_ids): + """ + Returns whether # of leaves == # of words. Otherwise some word is a subset of another. + """ + leaf_count = self.count_leaves(trie) + return len(nested_token_ids) != leaf_count + + +class DisjunctiveConstraint(Constraint): + r""" + A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints. + + Args: + nested_token_ids (`List[List[int]]`): + A list of words, where each word is a list of ids. This constraint is fulfilled by generating just one from + the list of words. + """ + + def __init__(self, nested_token_ids: List[List[int]]): + super(Constraint, self).__init__() + + if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0: + raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.") + if any(not isinstance(token_ids, list) for token_ids in nested_token_ids): + raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.") + if any( + any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) + for token_ids in nested_token_ids + ): + raise ValueError( + f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." + ) + + self.trie = DisjunctiveTrie(nested_token_ids) + self.token_ids = nested_token_ids + + self.seqlen = self.trie.max_height + self.current_seq = [] + self.completed = False + + def advance(self): + token_list = self.trie.next_tokens(self.current_seq) + + if len(token_list) == 0: + return None + else: + return token_list + + def does_advance(self, token_id: int): + if not isinstance(token_id, int): + raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") + + next_tokens = self.trie.next_tokens(self.current_seq) + + return token_id in next_tokens + + def update(self, token_id: int): + if not isinstance(token_id, int): + raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") + + stepped = False + completed = False + reset = False + + if self.does_advance(token_id): + self.current_seq.append(token_id) + stepped = True + else: + reset = True + self.reset() + + completed = self.trie.reached_leaf(self.current_seq) + self.completed = completed + + return stepped, completed, reset + + def reset(self): + self.completed = False + self.current_seq = [] + + def remaining(self): + if self.completed: + # since this can be completed without reaching max height + return 0 + else: + return self.seqlen - len(self.current_seq) + + def copy(self, stateful=False): + new_constraint = DisjunctiveConstraint(self.token_ids) + + if stateful: + new_constraint.seq_len = self.seqlen + new_constraint.current_seq = self.current_seq + new_constraint.completed = self.completed + + return new_constraint + + +class ConstraintListState: + r""" + A class for beam scorers to track its progress through a list of constraints. + + Args: + constraints (`List[Constraint]`): + A list of [`Constraint`] objects that must be fulfilled by the beam scorer. + """ + + def __init__(self, constraints: List[Constraint]): + self.constraints = constraints + + # max # of steps required to fulfill a given constraint + self.max_seqlen = max([c.seqlen for c in constraints]) + self.n_constraints = len(constraints) + self.completed = False + + self.init_state() + + def init_state(self): + self.complete_constraints = [] + self.inprogress_constraint = None + self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints] + + def get_bank(self): + add = 0 + if self.inprogress_constraint: + # extra points for having a constraint mid-fulfilled + add += self.max_seqlen - self.inprogress_constraint.remaining() + + return (len(self.complete_constraints) * self.max_seqlen) + add + + def advance(self): + """The list of tokens to generate such that we can make progress. + By "list" we don't mean the list of token that will fully fulfill a constraint. + + Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a + specific constraint `c_i`, we return: + + `[t_k1 for k in indices of unfulfilled constraints]` + + If we are in the middle of a constraint, then we return: + `[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint. + + Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, + that's the only one we'll return. + """ + token_list = [] + if self.inprogress_constraint is None: + for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" + advance = constraint.advance() + if isinstance(advance, int): + token_list.append(advance) + elif isinstance(advance, list): + token_list.extend(advance) + else: + advance = self.inprogress_constraint.advance() + if isinstance(advance, int): + token_list.append(advance) + elif isinstance(advance, list): + token_list.extend(advance) + + if len(token_list) == 0: + return None + else: + return token_list + + def reset(self, token_ids: Optional[List[int]]): + """ + token_ids: the tokens generated thus far to reset the state of the progress through constraints. + """ + self.init_state() + + if token_ids is not None: + for token in token_ids: + # completes or steps **one** constraint + complete, stepped = self.add(token) + + # the entire list of constraints are fulfilled + if self.completed: + break + + def add(self, token_id: int): + if not isinstance(token_id, int): + raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.") + + complete, stepped = False, False + + if self.completed: + complete = True + stepped = False + return complete, stepped + + if self.inprogress_constraint is not None: + # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current + # job, simply update the state + + stepped, complete, reset = self.inprogress_constraint.update(token_id) + if reset: + # 1. If the next token breaks the progress, then we must restart. + # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". + + # But that doesn't mean we self.init_state(), since we only reset the state for this particular + # constraint, not the full list of constraints. + + self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False)) + self.inprogress_constraint = None + + if complete: + # 2. If the next token completes the constraint, move it to completed list, set + # inprogress to None. If there are no pending constraints either, then this full list of constraints + # is complete. + + self.complete_constraints.append(self.inprogress_constraint) + self.inprogress_constraint = None + + if len(self.pending_constraints) == 0: + # we're done! + self.completed = True + + else: + # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list + # of constraints? + + for cidx, pending_constraint in enumerate(self.pending_constraints): + if pending_constraint.does_advance(token_id): + stepped, complete, reset = pending_constraint.update(token_id) + + if not stepped: + raise Exception( + "`constraint.update(token_id)` is not yielding incremental progress, " + "even though `constraint.does_advance(token_id)` is true." + ) + + if complete: + self.complete_constraints.append(pending_constraint) + self.inprogress_constraint = None + + if not complete and stepped: + self.inprogress_constraint = pending_constraint + + if complete or stepped: + # If we made any progress at all, then it's at least not a "pending constraint". + + self.pending_constraints = ( + self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] + ) + + if len(self.pending_constraints) == 0 and self.inprogress_constraint is None: + # If there's no longer any pending after this and no inprogress either, then we must be + # complete. + + self.completed = True + + break # prevent accidentally stepping through multiple constraints with just one token. + + return complete, stepped + + def copy(self, stateful=True): + new_state = ConstraintListState(self.constraints) # we actually never though self.constraints objects + # throughout this process. So it's at initialization state. + + if stateful: + new_state.complete_constraints = [ + constraint.copy(stateful=True) for constraint in self.complete_constraints + ] + if self.inprogress_constraint is not None: + new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True) + new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints] + + return new_state diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/beam_search.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/beam_search.py new file mode 100644 index 0000000000000000000000000000000000000000..5e73862e163ddfd53122420b88614d2818681e78 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/beam_search.py @@ -0,0 +1,1005 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Inc. team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from collections import UserDict +from typing import Dict, List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..utils import add_start_docstrings +from .beam_constraints import Constraint, ConstraintListState + + +PROCESS_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`): + Current scores of the top `2 * num_beams` non-finished beam hypotheses. + next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): + `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses. + next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): + Beam indices indicating to which beam hypothesis the `next_tokens` correspond. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + beam_indices (`torch.LongTensor`, *optional*): + Beam indices indicating to which beam hypothesis each token correspond. + group_index (`int`, *optional*): + The index of the group of beams. Used with [`~PreTrainedModel.group_beam_search`]. + + Return: + `UserDict`: A dictionary composed of the fields as defined above: + + - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of all + non-finished beams. + - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be added + to the non-finished beam_hypotheses. + - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices + indicating to which beam the next tokens shall be added. + +""" + +FINALIZE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + final_beam_scores (`torch.FloatTensor` of shape `(batch_size * num_beams)`): + The final scores of all non-finished beams. + final_beam_tokens (`torch.FloatTensor` of shape `(batch_size * num_beams)`): + The last tokens to be added to the non-finished beam_hypotheses. + final_beam_indices (`torch.FloatTensor` of shape `(batch_size * num_beams)`): + The beam indices indicating to which beam the `final_beam_tokens` shall be added. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + + Return: + `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. + The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early + due to the `eos_token_id`. + +""" + + +class BeamScorer(ABC): + """ + Abstract base class for all beam scorers that are used for [`~PreTrainedModel.beam_search`] and + [`~PreTrainedModel.beam_sample`]. + """ + + @abstractmethod + @add_start_docstrings(PROCESS_INPUTS_DOCSTRING) + def process( + self, + input_ids: torch.LongTensor, + next_scores: torch.FloatTensor, + next_tokens: torch.LongTensor, + next_indices: torch.LongTensor, + **kwargs, + ) -> Tuple[torch.Tensor]: + raise NotImplementedError("This is an abstract method.") + + @abstractmethod + @add_start_docstrings(FINALIZE_INPUTS_DOCSTRING) + def finalize( + self, + input_ids: torch.LongTensor, + next_scores: torch.FloatTensor, + next_tokens: torch.LongTensor, + next_indices: torch.LongTensor, + max_length: int, + **kwargs, + ) -> torch.LongTensor: + raise NotImplementedError("This is an abstract method.") + + +class BeamSearchScorer(BeamScorer): + r""" + [`BeamScorer`] implementing standard beam search decoding. + + Adapted in part from [Facebook's XLM beam search + code](https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529). + + Reference for the diverse beam search algorithm and implementation [Ashwin Kalyan's DBS + implementation](https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua) + + Args: + batch_size (`int`): + Batch Size of `input_ids` for which standard beam search decoding is run in parallel. + num_beams (`int`): + Number of beams for beam search. + device (`torch.device`): + Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be + allocated. + length_penalty (`float`, *optional*, defaults to 1.0): + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to + the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log + likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while + `length_penalty` < 0.0 encourages shorter sequences. + do_early_stopping (`bool` or `str`, *optional*, defaults to `False`): + Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: + `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an + heuristic is applied and the generation stops when is it very unlikely to find better candidates; + `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical + beam search algorithm). + num_beam_hyps_to_keep (`int`, *optional*, defaults to 1): + The number of beam hypotheses that shall be returned upon calling + [`~transformers.BeamSearchScorer.finalize`]. + num_beam_groups (`int`, *optional*, defaults to 1): + Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. + See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. + max_length (`int`, *optional*): + The maximum length of the sequence to be generated. + """ + + def __init__( + self, + batch_size: int, + num_beams: int, + device: torch.device, + length_penalty: Optional[float] = 1.0, + do_early_stopping: Optional[Union[bool, str]] = False, + num_beam_hyps_to_keep: Optional[int] = 1, + num_beam_groups: Optional[int] = 1, + max_length: Optional[int] = None, + ): + self.num_beams = num_beams + self.device = device + self.length_penalty = length_penalty + self.do_early_stopping = do_early_stopping + self.num_beam_hyps_to_keep = num_beam_hyps_to_keep + self.num_beam_groups = num_beam_groups + self.group_size = self.num_beams // self.num_beam_groups + + self._is_init = False + # self._beam_hyps[i*self.num_beam_groups+j] is the beam_hyps of the j-th group in the i-th mini-batch. + # If group_beam_search is not used, the list consists of `batch_size` beam_hyps. + self._beam_hyps = [ + BeamHypotheses( + num_beams=self.group_size, + length_penalty=self.length_penalty, + early_stopping=self.do_early_stopping, + max_length=max_length, + ) + for _ in range(batch_size * self.num_beam_groups) + ] + # self._done[i*self.num_beam_groups+j] indicates whether the generation of the beam_hyps of the j-th group + # in the i-th mini-batch is complete. + self._done = torch.tensor( + [False for _ in range(batch_size * self.num_beam_groups)], dtype=torch.bool, device=self.device + ) + + if not isinstance(num_beams, int) or num_beams <= 1: + raise ValueError( + f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1," + " one should make use of `greedy_search` instead." + ) + + if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0): + raise ValueError( + "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be" + f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}." + ) + + @property + def is_done(self) -> bool: + return self._done.all() + + def process( + self, + input_ids: torch.LongTensor, + next_scores: torch.FloatTensor, + next_tokens: torch.LongTensor, + next_indices: torch.LongTensor, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + beam_indices: Optional[torch.LongTensor] = None, + group_index: Optional[int] = 0, + decoder_prompt_len: Optional[int] = 0, + ) -> Dict[str, torch.Tensor]: + # add up to the length which the next_scores is calculated on (including decoder prompt) + cur_len = input_ids.shape[-1] + 1 + batch_size = len(self._beam_hyps) // self.num_beam_groups + + if not (batch_size == (input_ids.shape[0] // self.group_size)): + if self.num_beam_groups > 1: + raise ValueError( + f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam " + f"size of {self.group_size} is expected by the beam scorer." + ) + else: + raise ValueError( + f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of " + f"{self.group_size} is expected by the beam scorer." + ) + + device = input_ids.device + next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device) + next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device) + next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + for batch_idx in range(batch_size): + batch_group_idx = batch_idx * self.num_beam_groups + group_index + if self._done[batch_group_idx]: + if self.num_beams < len(self._beam_hyps[batch_group_idx]): + raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated") + if eos_token_id is None or pad_token_id is None: + raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined") + # pad the batch + next_beam_scores[batch_idx, :] = 0 + next_beam_tokens[batch_idx, :] = pad_token_id + next_beam_indices[batch_idx, :] = 0 + continue + + # next tokens for this sentence + beam_idx = 0 + for beam_token_rank, (next_token, next_score, next_index) in enumerate( + zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx]) + ): + batch_beam_idx = batch_idx * self.group_size + next_index + # add to generated hypotheses if end of sentence + if (eos_token_id is not None) and (next_token.item() in eos_token_id): + # if beam_token does not belong to top num_beams tokens, it should not be added + is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size + if is_beam_token_worse_than_top_num_beams: + continue + if beam_indices is not None: + beam_index = beam_indices[batch_beam_idx] + beam_index = beam_index + (batch_beam_idx,) + else: + beam_index = None + + self._beam_hyps[batch_group_idx].add( + input_ids[batch_beam_idx].clone(), + next_score.item(), + beam_indices=beam_index, + generated_len=cur_len - decoder_prompt_len, + ) + else: + # add next predicted token since it is not eos_token + next_beam_scores[batch_idx, beam_idx] = next_score + next_beam_tokens[batch_idx, beam_idx] = next_token + next_beam_indices[batch_idx, beam_idx] = batch_beam_idx + beam_idx += 1 + + # once the beam for next step is full, don't add more tokens to it. + if beam_idx == self.group_size: + break + + if beam_idx < self.group_size: + raise ValueError( + f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:" + f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected." + ) + + # Check if we are done so that we can save a pad step if all(done) + self._done[batch_group_idx] = self._done[batch_group_idx] or self._beam_hyps[batch_group_idx].is_done( + next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len + ) + + return UserDict( + { + "next_beam_scores": next_beam_scores.view(-1), + "next_beam_tokens": next_beam_tokens.view(-1), + "next_beam_indices": next_beam_indices.view(-1), + } + ) + + def finalize( + self, + input_ids: torch.LongTensor, + final_beam_scores: torch.FloatTensor, + final_beam_tokens: torch.LongTensor, + final_beam_indices: torch.LongTensor, + max_length: int, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + beam_indices: Optional[torch.LongTensor] = None, + decoder_prompt_len: Optional[int] = 0, + ) -> Tuple[torch.LongTensor]: + batch_size = len(self._beam_hyps) // self.num_beam_groups + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + # finalize all open beam hypotheses and add to generated hypotheses + for batch_group_idx, beam_hyp in enumerate(self._beam_hyps): + if self._done[batch_group_idx]: + continue + + # all open beam hypotheses are added to the beam hypothesis + # beam hypothesis class automatically keeps the best beams + for index_per_group in range(self.group_size): + batch_beam_idx = batch_group_idx * self.group_size + index_per_group + final_score = final_beam_scores[batch_beam_idx].item() + final_tokens = input_ids[batch_beam_idx] + beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None + generated_len = final_tokens.shape[-1] - decoder_prompt_len + beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len) + + # select the best hypotheses + sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep) + best = [] + best_indices = [] + best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32) + + # retrieve best hypotheses + for i in range(batch_size): + beam_hyps_in_batch = self._beam_hyps[i * self.num_beam_groups : (i + 1) * self.num_beam_groups] + candidate_beams = [beam for beam_hyp in beam_hyps_in_batch for beam in beam_hyp.beams] + sorted_hyps = sorted(candidate_beams, key=lambda x: x[0]) + for j in range(self.num_beam_hyps_to_keep): + best_hyp_tuple = sorted_hyps.pop() + best_score = best_hyp_tuple[0] + best_hyp = best_hyp_tuple[1] + best_index = best_hyp_tuple[2] + sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp) + + # append hyp to lists + best.append(best_hyp) + + # append indices to list + best_indices.append(best_index) + + best_scores[i * self.num_beam_hyps_to_keep + j] = best_score + + # prepare for adding eos + sent_lengths_max = sent_lengths.max().item() + 1 + sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max + decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) + + if len(best_indices) > 0 and best_indices[0] is not None: + indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) + else: + indices = None + + # shorter batches are padded if needed + if sent_lengths.min().item() != sent_lengths.max().item(): + if pad_token_id is None: + raise ValueError("`pad_token_id` has to be defined") + decoded.fill_(pad_token_id) + + if indices is not None: + indices.fill_(-1) + + # fill with hypotheses and eos_token_id if the latter fits in + for i, (hypo, best_idx) in enumerate(zip(best, best_indices)): + decoded[i, : sent_lengths[i]] = hypo + + if indices is not None: + indices[i, : len(best_idx)] = torch.tensor(best_idx) + + if sent_lengths[i] < sent_max_len: + # inserting only the first eos_token_id + decoded[i, sent_lengths[i]] = eos_token_id[0] + + return UserDict( + { + "sequences": decoded, + "sequence_scores": best_scores, + "beam_indices": indices, + } + ) + + +class ConstrainedBeamSearchScorer(BeamScorer): + r""" + [`BeamScorer`] implementing constrained beam search decoding. + + + Args: + batch_size (`int`): + Batch Size of `input_ids` for which standard beam search decoding is run in parallel. + num_beams (`int`): + Number of beams for beam search. + constraints (`List[Constraint]`): + A list of positive constraints represented as `Constraint` objects that must be fulfilled in the generation + output. For more information, the documentation of [`Constraint`] should be read. + device (`torch.device`): + Defines the device type (*e.g.*, `"cpu"` or `"cuda"`) on which this instance of `BeamSearchScorer` will be + allocated. + length_penalty (`float`, *optional*, defaults to 1.0): + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to + the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log + likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while + `length_penalty` < 0.0 encourages shorter sequences. + do_early_stopping (`bool` or `str`, *optional*, defaults to `False`): + Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: + `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an + heuristic is applied and the generation stops when is it very unlikely to find better candidates; + `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical + beam search algorithm). + num_beam_hyps_to_keep (`int`, *optional*, defaults to 1): + The number of beam hypotheses that shall be returned upon calling + [`~transformers.BeamSearchScorer.finalize`]. + num_beam_groups (`int`, *optional*, defaults to 1): + Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. + See [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. + max_length (`int`, *optional*): + The maximum length of the sequence to be generated. + """ + + def __init__( + self, + batch_size: int, + num_beams: int, + constraints: List[Constraint], + device: torch.device, + length_penalty: Optional[float] = 1.0, + do_early_stopping: Optional[Union[bool, str]] = False, + num_beam_hyps_to_keep: Optional[int] = 1, + num_beam_groups: Optional[int] = 1, + max_length: Optional[int] = None, + ): + self.num_beams = num_beams + self.device = device + self.length_penalty = length_penalty + self.do_early_stopping = do_early_stopping + self.num_beam_hyps_to_keep = num_beam_hyps_to_keep + self.num_beam_groups = num_beam_groups + self.group_size = self.num_beams // self.num_beam_groups + self.constraints = constraints + + self._is_init = False + self._beam_hyps = [ + BeamHypotheses( + num_beams=self.num_beams, + length_penalty=self.length_penalty, + early_stopping=self.do_early_stopping, + max_length=max_length, + ) + for _ in range(batch_size) + ] + self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device) + + if not isinstance(num_beams, int) or num_beams <= 1: + raise ValueError( + f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1," + " one should make use of `greedy_search` instead." + ) + + if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0): + raise ValueError( + "`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` has to be" + f" divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}." + ) + + @property + def is_done(self) -> bool: + return self._done.all() + + def make_constraint_states(self, n): + return [ConstraintListState([constraint.copy() for constraint in self.constraints]) for _ in range(n)] + + def check_completes_constraints(self, sequence): + new_state = self.make_constraint_states(1)[0] + new_state.reset(sequence) + return new_state.completed + + def process( + self, + input_ids: torch.LongTensor, + next_scores: torch.FloatTensor, + next_tokens: torch.LongTensor, + next_indices: torch.LongTensor, + scores_for_all_vocab: torch.FloatTensor, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + beam_indices: Optional[torch.LongTensor] = None, + decoder_prompt_len: Optional[int] = 0, + ) -> Tuple[torch.Tensor]: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size * num_beams, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using any class inheriting from [`PreTrainedTokenizer`]. See + [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + next_scores (`torch.FloatTensor` of shape `(batch_size, 2 * num_beams)`): + Current scores of the top `2 * num_beams` non-finished beam hypotheses. + next_tokens (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): + `input_ids` of the tokens corresponding to the top `2 * num_beams` non-finished beam hypotheses. + next_indices (`torch.LongTensor` of shape `(batch_size, 2 * num_beams)`): + Beam indices indicating to which beam hypothesis the `next_tokens` correspond. + scores_for_all_vocab (`torch.FloatTensor` of shape `(batch_size * num_beams, sequence_length)`): + The scores of all tokens in the vocabulary for each of the beam hypotheses. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + beam_indices (`torch.LongTensor`, *optional*): + Beam indices indicating to which beam hypothesis each token correspond. + decoder_prompt_len (`int`, *optional*): + The length of prompt that is included in the input to decoder. + Return: + `UserDict`: A dictionary composed of the fields as defined above: + + - **next_beam_scores** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Updated scores of + all + non-finished beams. + + - **next_beam_tokens** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Next tokens to be + added + to the non-finished beam_hypotheses. + - **next_beam_indices** (`torch.FloatTensor` of shape `(batch_size * num_beams)`) -- Beam indices + indicating to which beam the next tokens shall be added. + """ + + # add up to the length which the next_scores is calculated on (including decoder prompt) + cur_len = input_ids.shape[-1] + 1 + batch_size = len(self._beam_hyps) + if not (batch_size == (input_ids.shape[0] // self.group_size)): + if self.num_beam_groups > 1: + raise ValueError( + f"A group beam size of {input_ids.shape[0]} is used as the input, but a group beam " + f"size of {self.group_size} is expected by the beam scorer." + ) + else: + raise ValueError( + f"A beam size of {input_ids.shape[0]} is used as the input, but a beam size of " + f"{self.group_size} is expected by the beam scorer." + ) + + device = input_ids.device + + next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device) + next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device) + next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + for batch_idx, beam_hyp in enumerate(self._beam_hyps): + if self._done[batch_idx]: + if self.num_beams < len(beam_hyp): + raise ValueError(f"Batch can only be done if at least {self.num_beams} beams have been generated") + if eos_token_id is None or pad_token_id is None: + raise ValueError("Generated beams >= num_beams -> eos_token_id and pad_token have to be defined") + # pad the batch + next_beam_scores[batch_idx, :] = 0 + next_beam_tokens[batch_idx, :] = pad_token_id + next_beam_indices[batch_idx, :] = 0 + continue + + # next tokens for this sentence. + beam_idx = 0 + for beam_token_rank, (next_token, next_score, next_index) in enumerate( + zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx]) + ): + batch_beam_idx = batch_idx * self.group_size + next_index + # add to generated hypotheses if end of sentence + if (eos_token_id is not None) and (next_token.item() in eos_token_id): + # if beam_token does not belong to top num_beams tokens, it should not be added + is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size + if is_beam_token_worse_than_top_num_beams: + continue + + completes_constraint = self.check_completes_constraints(input_ids[batch_beam_idx].cpu().tolist()) + if completes_constraint: + if beam_indices is not None: + beam_index = beam_indices[batch_beam_idx] + beam_index = beam_index + (batch_beam_idx,) + else: + beam_index = None + + beam_hyp.add( + input_ids[batch_beam_idx].clone(), + next_score.item(), + beam_indices=beam_index, + generated_len=cur_len - decoder_prompt_len, + ) + else: + # add next predicted token since it is not eos_token + next_beam_scores[batch_idx, beam_idx] = next_score + next_beam_tokens[batch_idx, beam_idx] = next_token + next_beam_indices[batch_idx, beam_idx] = batch_beam_idx + beam_idx += 1 + + # once the beam for next step is full, don't add more tokens to it. + if beam_idx == self.group_size: + break + + new_scores, new_tokens, new_indices = self.step_sentence_constraint( + batch_idx, + input_ids, + scores_for_all_vocab, + next_beam_scores[batch_idx], + next_beam_tokens[batch_idx], + next_beam_indices[batch_idx], + ) + + next_beam_scores[batch_idx] = new_scores + next_beam_tokens[batch_idx] = new_tokens + next_beam_indices[batch_idx] = new_indices + + if beam_idx < self.group_size: + raise ValueError( + f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id:" + f" {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected." + ) + + # Check if we are done so that we can save a pad step if all(done) + self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done( + next_scores[batch_idx].max().item(), cur_len, decoder_prompt_len + ) + + return UserDict( + { + "next_beam_scores": next_beam_scores.view(-1), + "next_beam_tokens": next_beam_tokens.view(-1), + "next_beam_indices": next_beam_indices.view(-1), + } + ) + + def step_sentence_constraint( + self, + batch_idx: int, + input_ids: torch.LongTensor, + vocab_scores: torch.FloatTensor, + sent_beam_scores: torch.FloatTensor, + sent_beam_tokens: torch.LongTensor, + sent_beam_indices: torch.LongTensor, + push_progress: bool = False, + ): + # sent_beam_tokens are the next {num_beams} number of tokens that are under consideration for this beam + # (candidate next tokens) + + # 1. Adding "advance_tokens" + # using ConstraintStateList.advance(), we propose new tokens to be added into this "candidate list" that will + # advance us in fulfilling the constraints. + + # 2. Selecting best candidates such that we end up with highest probable candidates + # that fulfill our constraints. + + orig_len = sent_beam_indices.size(0) + device = sent_beam_indices.device + + # initialize states + topk_contraint_states = self.make_constraint_states(orig_len) + advance_constraint_states = self.make_constraint_states(orig_len) + + sidx, eidx = batch_idx * orig_len, (batch_idx + 1) * orig_len + this_batch_input_ids = input_ids[sidx:eidx] + this_batch_token_scores = vocab_scores[sidx:eidx] + full_hypotheses = torch.cat((input_ids[sent_beam_indices], sent_beam_tokens.unsqueeze(-1)), dim=-1) + + # need to make new hypothesis that advance the constraints + track_new = { + "new_seqs": full_hypotheses.tolist(), + "new_states": [], + "new_indices": [], + "new_tokens": [], + "new_scores": [], + } + for seq_idx, pre_seq in enumerate(this_batch_input_ids): + # pre_seq = ith sequence generated before this step. + + # input_ids -> (topk) generic beam search best model next tokens + # -> (advance) constraints forcing the next token + # either way, we need to sort them into "banks" later, so store a "ConstraintListState" for all types of + # hypotheses. + + topk_state = topk_contraint_states[seq_idx] + topk_state.reset(full_hypotheses[seq_idx].cpu().tolist()) + + advance_state = advance_constraint_states[seq_idx] + advance_state.reset(pre_seq.cpu().tolist()) + + if not advance_state.completed: + advance_tokens = torch.LongTensor(advance_state.advance()).to(device) + for advance_token in advance_tokens: + # since adding each `advance_token` leads to a different hypothesis, create new state instance. + new_state = advance_state.copy(stateful=True) + new_state.add(advance_token.cpu().tolist()) + + advance_seq = torch.cat((pre_seq, advance_token.unsqueeze(0)), -1).cpu().tolist() + if advance_seq not in track_new["new_seqs"]: + # prevent duplicates, which are basically bound to happen in this process. + track_new["new_seqs"].append(advance_seq) + track_new["new_indices"].append(sidx + seq_idx) # idx -> global idx across all the batches + track_new["new_tokens"].append(advance_token) + track_new["new_scores"].append(this_batch_token_scores[seq_idx].take(advance_token)) + track_new["new_states"].append(new_state) + elif push_progress: + # Basically, `sent_beam_indices` often chooses very little among `input_ids` the generated sequences that + # actually fulfill our constraints. For example, let constraints == ["loves pies"] and + + # pre_seq_1 = "The child loves pies and" pre_seq_2 = "The child plays in the playground and" + + # Without this step, if `sent_beam_indices` is something like [1,1], then + # 1. `pre_seq_1` won't be added to the list of (topk) hypothesis since it's not in the indices and + # 2. it won't be added to the list of (advance) hypothesis since it's completed already. (this is + # the else part of `if constraints_completed[seq_idx]`) + # 3. it ends up simply getting removed from consideration. + + # #3 might be fine and actually desired, since it's likely that it's a low-probability output anyways, + # especially if it's not in the list of `sent_beam_indices`. But this often leads to lengthened beam + # search times, since completed sequences keep getting removed after all this effort for constrained + # generation. + + # Here, we basically take `pre_seq_1` and to "push" it into the considered list of hypotheses, by simply + # appending the next likely token in the vocabulary and adding it to the list of hypotheses. + + new_score, new_token = torch.max(this_batch_token_scores[seq_idx], 0) # some next probable token + advance_seq = torch.cat((pre_seq, new_token.unsqueeze(0)), -1) + + advance_state = advance_constraint_states[seq_idx] + + advance_seq = advance_seq.cpu().tolist() + + advance_state.reset(advance_seq) + if advance_seq not in track_new["new_seqs"]: + # but still don't want to have duplicates + track_new["new_seqs"].append(advance_seq) + track_new["new_indices"].append(seq_idx) + track_new["new_tokens"].append(new_token) + track_new["new_scores"].append(new_score) + track_new["new_states"].append(advance_state) + + if len(track_new["new_indices"]) > 0: + new_indices = torch.tensor(track_new["new_indices"]).to(device) + new_tokens = torch.stack(track_new["new_tokens"]).to(device) + new_scores = torch.stack(track_new["new_scores"]).to(device) + + all_states = topk_contraint_states + track_new["new_states"] + all_tokens = torch.cat((sent_beam_tokens, new_tokens), -1) + all_scores = torch.cat((sent_beam_scores, new_scores), -1) + all_banks = torch.tensor([one.get_bank() for one in all_states]).to(device) + + zipped = all_banks * 100 + all_scores + indices = zipped.sort(descending=True).indices + sorted_banks = all_banks[indices] + + # Then we end up with {sorted among bank C}, {sorted among bank C-1}, ..., {sorted among bank 0} + + counter = -1 + cur_bank = sorted_banks[0] + increments = [] + for bank in sorted_banks: + if bank == cur_bank: + counter += 1 + else: + counter = 0 + cur_bank = bank + increments.append(counter) + rearrangers = torch.tensor(np.argsort(increments, kind="mergesort")) + + indices = indices[rearrangers][:orig_len] + + sent_beam_scores = all_scores[indices] + sent_beam_tokens = all_tokens[indices] + sent_beam_indices = torch.cat((sent_beam_indices, new_indices))[indices] + + return sent_beam_scores, sent_beam_tokens, sent_beam_indices + + def finalize( + self, + input_ids: torch.LongTensor, + final_beam_scores: torch.FloatTensor, + final_beam_tokens: torch.LongTensor, + final_beam_indices: torch.LongTensor, + max_length: int, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + beam_indices: Optional[torch.LongTensor] = None, + decoder_prompt_len: Optional[int] = 0, + ) -> Tuple[torch.LongTensor]: + batch_size = len(self._beam_hyps) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + + # finalize all open beam hypotheses and add to generated hypotheses + for batch_idx, beam_hyp in enumerate(self._beam_hyps): + if self._done[batch_idx]: + continue + + # all open beam hypotheses are added to the beam hypothesis + # beam hypothesis class automatically keeps the best beams + + ids_collect = [] + for beam_id in range(self.num_beams): + batch_beam_idx = batch_idx * self.num_beams + beam_id + final_score = final_beam_scores[batch_beam_idx].item() + final_tokens = input_ids[batch_beam_idx] + + completes_constraint = self.check_completes_constraints(final_tokens.cpu().tolist()) + if completes_constraint: + beam_index = beam_indices[batch_beam_idx] if beam_indices is not None else None + generated_len = final_tokens.shape[-1] - decoder_prompt_len + beam_hyp.add(final_tokens, final_score, beam_indices=beam_index, generated_len=generated_len) + ids_collect.append(beam_id) + + # due to overly complex constraints or other factors, sometimes we can't gaurantee a successful + # generation. In these cases we simply return the highest scoring outputs. + if len(ids_collect) < self.num_beam_hyps_to_keep: + for beam_id in range(self.num_beams): + if beam_id not in ids_collect: + batch_beam_idx = batch_idx * self.num_beams + beam_id + final_score = final_beam_scores[batch_beam_idx].item() + final_tokens = input_ids[batch_beam_idx] + generated_len = final_tokens.shape[-1] - decoder_prompt_len + beam_hyp.add(final_tokens, final_score, generated_len=generated_len) + if len(ids_collect) >= self.num_beam_hyps_to_keep: + break + + # select the best hypotheses + sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep) + best = [] + best_indices = [] + best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32) + + # retrieve best hypotheses + for i, beam_hyp in enumerate(self._beam_hyps): + sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0]) + for j in range(self.num_beam_hyps_to_keep): + best_hyp_tuple = sorted_hyps.pop() + best_score = best_hyp_tuple[0] + best_hyp = best_hyp_tuple[1] + best_index = best_hyp_tuple[2] + sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp) + + # append to lists + best.append(best_hyp) + + # append indices to list + best_indices.append(best_index) + + best_scores[i * self.num_beam_hyps_to_keep + j] = best_score + + # prepare for adding eos + sent_lengths_max = sent_lengths.max().item() + 1 + + sent_max_len = min(sent_lengths_max, max_length) if max_length is not None else sent_lengths_max + decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) + + if len(best_indices) > 0 and best_indices[0] is not None: + indices: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) + else: + indices = None + + # shorter batches are padded if needed + if sent_lengths.min().item() != sent_lengths.max().item(): + if pad_token_id is None: + raise ValueError("`pad_token_id` has to be defined") + decoded.fill_(pad_token_id) + + if indices is not None: + indices.fill_(-1) + + # fill with hypotheses and eos_token_id if the latter fits in + for i, (hypo, best_idx) in enumerate(zip(best, best_indices)): + decoded[i, : sent_lengths[i]] = hypo + + if indices is not None: + indices[i, : len(best_idx)] = torch.tensor(best_idx) + + if sent_lengths[i] < sent_max_len: + # inserting only the first eos_token_id + decoded[i, sent_lengths[i]] = eos_token_id[0] + + return UserDict( + { + "sequences": decoded, + "sequence_scores": best_scores, + "beam_indices": indices, + } + ) + + +class BeamHypotheses: + def __init__(self, num_beams: int, length_penalty: float, early_stopping: bool, max_length: Optional[int] = None): + """ + Initialize n-best list of hypotheses. + """ + self.length_penalty = length_penalty + self.early_stopping = early_stopping + self.max_length = max_length + self.num_beams = num_beams + self.beams = [] + self.worst_score = 1e9 + + if not isinstance(self.early_stopping, bool) and self.max_length is None: + raise ValueError( + "When `do_early_stopping` is set to a string, `max_length` must be defined. Ensure it is passed to the" + " BeamScorer class instance at initialization time." + ) + + def __len__(self): + """ + Number of hypotheses in the list. + """ + return len(self.beams) + + def add( + self, + hyp: torch.LongTensor, + sum_logprobs: float, + beam_indices: Optional[torch.LongTensor] = None, + generated_len: Optional[int] = None, + ): + """ + Add a new hypothesis to the list. + """ + if generated_len is not None: + score = sum_logprobs / (generated_len**self.length_penalty) + # This 'else' case exists for retrocompatibility + else: + score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty) + + if len(self) < self.num_beams or score > self.worst_score: + self.beams.append((score, hyp, beam_indices)) + if len(self) > self.num_beams: + sorted_next_scores = sorted([(s, idx) for idx, (s, _, _) in enumerate(self.beams)]) + del self.beams[sorted_next_scores[0][1]] + self.worst_score = sorted_next_scores[1][0] + else: + self.worst_score = min(score, self.worst_score) + + def is_done(self, best_sum_logprobs: float, cur_len: int, decoder_prompt_len: Optional[int] = 0) -> bool: + """ + If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst + one in the heap, then we are done with this sentence. + """ + + if len(self) < self.num_beams: + return False + + # `True`: stop as soon as at least `num_beams` hypotheses are finished + if self.early_stopping is True: + return True + # `False`: heuristic -- compute best possible score from `cur_len`, even though it is not entirely accurate + # when `length_penalty` is positive. See the discussion below for more details. + # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 + elif self.early_stopping is False: + highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty + ret = self.worst_score >= highest_attainable_score + return ret + # `"never"`: compute the best possible score, depending on the signal of `length_penalty` + else: + # `length_penalty` > 0.0 -> max denominator is obtaned from `max_length`, not from `cur_len` -> min + # abs(`highest_attainable_score`) is obtained -> `highest_attainable_score` is negative, hence we obtain + # its max this way + if self.length_penalty > 0.0: + if self.max_length <= decoder_prompt_len: + raise ValueError("max_length is not larger than decoder prompt length") + highest_attainable_score = ( + best_sum_logprobs / (self.max_length - decoder_prompt_len) ** self.length_penalty + ) + # the opposite logic applies here (max `highest_attainable_score` from `cur_len`) + else: + highest_attainable_score = best_sum_logprobs / (cur_len - decoder_prompt_len) ** self.length_penalty + ret = self.worst_score >= highest_attainable_score + return ret diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/candidate_generator.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/candidate_generator.py new file mode 100644 index 0000000000000000000000000000000000000000..6bd55c5f6b5109e49628fec8ebfc0b091be9db31 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/candidate_generator.py @@ -0,0 +1,425 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple + +import torch + +from ..cache_utils import DynamicCache + + +if TYPE_CHECKING: + from ..modeling_utils import PreTrainedModel + from .configuration_utils import GenerationConfig + from .logits_process import LogitsProcessorList + + +class CandidateGenerator: + """Abstract base class for all candidate generators that can be applied during assisted generation.""" + + def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]: + """ + Fetches the candidates to be tried for the current input. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + + Return: + `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be + assessed by the model and, optionally, a `torch.FloatTensor` of shape `(batch_size, candidate_length, + vocabulary_size)` containing the logits associated to each candidate. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can call `get_candidates`." + ) + + def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int): + """ + Updates the candidate generation strategy based on the outcomes. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using + beam search or log softmax for each vocabulary token when using beam search + num_matches (`int`): + The number of matches between the candidate sequences and the model predictions. + """ + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can call " + "`update_candidate_strategy`." + ) + + +class AssistedCandidateGenerator(CandidateGenerator): + """ + `CandidateGenerator` class to be used for assisted generation and speculative decoding. This class generates + candidates through the use of a smaller model. Read the following blog post for more information: + https://huggingface.co/blog/assisted-generation + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + assistant_model (`PreTrainedModel`): + The model to be used for generating candidates. This model should be smaller than the main model. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. + logits_processor (`LogitsProcessorList`): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + model_kwargs (`Dict`): + The keyword arguments that will be passed to the main model, and are used as base inputs for the assistant + model as well. + inputs_tensor (`torch.Tensor`, *optional*): + The model input tensor. In encoder-decoder models, this is the encoder input. + """ + + def __init__( + self, + input_ids: torch.LongTensor, + assistant_model: "PreTrainedModel", + generation_config: "GenerationConfig", + logits_processor: "LogitsProcessorList", + model_kwargs: Dict, + inputs_tensor: Optional[torch.Tensor] = None, + ): + # Make sure all data at the same device as assistant model + device = assistant_model.device + input_ids = input_ids.to(device) + if inputs_tensor is not None: + inputs_tensor = inputs_tensor.to(device) + + # Prepare the assistant and the starting number of candidate tokens + self.assistant_model = assistant_model + self.num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens + + # Prepare the kwargs for the assistant model + assistant_kwargs = {} + for key, value in model_kwargs.items(): # deepcopy crashes if we attempt to copy encoder outputs with grads + if key not in ("encoder_outputs", "assistant_encoder_outputs"): + assistant_kwargs[key] = ( + value.detach().to(device) if isinstance(value, torch.Tensor) else copy.deepcopy(value) + ) + + if "assistant_encoder_outputs" in model_kwargs: + assistant_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"] + elif assistant_model.config.is_encoder_decoder: + inputs_tensor, model_input_name, assistant_kwargs = assistant_model._prepare_model_inputs( + inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_kwargs + ) + assistant_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation( + inputs_tensor, assistant_kwargs, model_input_name + ) + elif "encoder_outputs" in model_kwargs: + assistant_kwargs["encoder_outputs"] = model_kwargs["encoder_outputs"] + self.assistant_kwargs = assistant_kwargs + + # Prepare assistant model's keys of inputs + if assistant_model.config.is_encoder_decoder: + # both are encoder-decoder + self.input_ids_key = "decoder_input_ids" + elif "encoder_outputs" in assistant_kwargs: + # special case for encoder-decoder with decoder-only assistant (like DistilWhisper) + self.input_ids_key = "input_ids" + self.assistant_kwargs["attention_mask"] = self.assistant_kwargs.get( + "decoder_attention_mask", + torch.ones((input_ids.shape[0], 1), device=input_ids.device, dtype=torch.long), + ) + else: + # both are decoder-only + self.input_ids_key = "input_ids" + + # Prepare generation-related options. + self.logits_processor = logits_processor + self.generation_config = copy.deepcopy(generation_config) + self.generation_config.return_dict_in_generate = True + self.generation_config.output_scores = True + + # avoid unnecessary warnings that min_length is larger than max_new_tokens + self.main_model_min_length = self.generation_config.min_length + self.generation_config.min_length = 0 + self.generation_config.min_new_tokens = None + + def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]: + """ + Fetches the candidates to be tried for the current input. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + + Return: + `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be + assessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length, + vocabulary_size)` containing the logits associated to each candidate. + """ + input_ids = input_ids.to(self.assistant_model.device) + + # Don't generate more than `max_length - 1` candidates since the target model generates one extra token. + new_cur_len = input_ids.shape[-1] + max_new_tokens = min(int(self.num_assistant_tokens), self.generation_config.max_length - new_cur_len - 1) + min_new_tokens = max(min(max_new_tokens, self.main_model_min_length - new_cur_len), 0) + if max_new_tokens == 0: + return input_ids, None + + # 1. If it is not the first round of candidate generation, prepare the inputs based on the input_ids length + # (which implicitly contains the number of accepted candidates from the previous round) + has_past_key_values = self.assistant_kwargs.get("past_key_values", None) is not None + if has_past_key_values: + new_cache_size = new_cur_len - 1 + self.assistant_kwargs["past_key_values"] = _crop_past_key_values( + self.assistant_model, self.assistant_kwargs["past_key_values"], new_cache_size - 1 + ) # the assistant does not have the token after the last match, hence the -1 + + self.assistant_kwargs = _prepare_attention_mask( + self.assistant_kwargs, new_cur_len, self.assistant_model.config.is_encoder_decoder + ) + self.assistant_kwargs = _prepare_token_type_ids(self.assistant_kwargs, new_cur_len) + + # 2. Forecast next N tokens using the assistant model. + assistant_generation_kwargs = { + self.input_ids_key: input_ids, + "min_new_tokens": min_new_tokens, + "max_new_tokens": max_new_tokens, + "generation_config": self.generation_config, + "logits_processor": self.logits_processor, + } + + assistant_output = self.assistant_model.generate(**assistant_generation_kwargs, **self.assistant_kwargs) + + # 3. Update variables for the next round of candidate generation + self.assistant_kwargs["past_key_values"] = assistant_output.past_key_values + + # 4. Prepare variables for output + candidate_logits = torch.stack(assistant_output.scores, dim=1) + candidate_ids = assistant_output.sequences + return candidate_ids, candidate_logits + + def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int): + """ + Updates the candidate generation strategy based on the outcomes. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using + beam search or log softmax for each vocabulary token when using beam search + num_matches (`int`): + The number of matches between the candidate sequences and the model predictions. + """ + # Adjust the max number of assistant tokens to use in the next iteration. This is a simple heuristic, + # probably can be improved -- we want to balance the benefits of getting assistant tokens correct with the + # cost of forecasting incorrect assistant tokens. + if self.assistant_model.generation_config.num_assistant_tokens_schedule in { + "heuristic", + "heuristic_transient", + }: + if num_matches == int(self.num_assistant_tokens): + self.num_assistant_tokens += 2.0 + else: + self.num_assistant_tokens = max(1.0, self.num_assistant_tokens - 1.0) + + +class PromptLookupCandidateGenerator(CandidateGenerator): + """ + `CandidateGenerator` class to be used for prompt lookup generation. This class generates candidates by looking up + likely continuations in the provided prompt (input_ids) itself. + Read the following blog post for more information: https://github.com/apoorvumang/prompt-lookup-decoding + + Args: + max_matching_ngram_size (`int`): + The maximum ngram size to be considered for matching in the prompt + num_output_tokens (`int`): + The number of tokens to be output as candidate tokens. + max_length (`int`): + The number of total maximum tokens that can be generated. For decoder-only models that includes the prompt length. + Defaults to 20, which is the max length used as default in generation config. + """ + + def __init__( + self, + num_output_tokens: int = 10, + max_matching_ngram_size: int = None, + max_length: int = 20, + ): + self.num_output_tokens = num_output_tokens + self.max_matching_ngram_size = max_matching_ngram_size if max_matching_ngram_size else 2 + self.max_length = max_length + + if self.max_matching_ngram_size <= 0 or self.num_output_tokens <= 0: + raise ValueError("Invalid max_matching_ngram_size or num_output_tokens") + + def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]: + """ + Fetches the candidates to be tried for the current input. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + + Return: + `torch.LongTensor` of shape `(num_candidates, candidate_length)`: The candidate sequences to be tried. + """ + input_length = input_ids.size(1) + + # Don't generate more than `max_length - 1` candidates since the target model generates one extra token. + if self.max_length == input_length + 1: + return input_ids, None + + chosen_ids = None + match_found = False + for ngram_size in range(min(self.max_matching_ngram_size, input_length - 1), 0, -1): + # Create sliding windows of size ngram_size + windows = input_ids.unfold(dimension=1, size=ngram_size, step=1) + + # Convert ngram to a tensor for comparison + ngram_tensor = input_ids[0, -ngram_size:] + + # Find where the windows match the ngram + matches = (windows == ngram_tensor).all(dim=2) + + # Get the indices of matches + match_indices = matches.nonzero(as_tuple=True)[1] + + # Iterate through match indices to find a valid continuation + for idx in match_indices: + start_idx = idx + ngram_size + end_idx = start_idx + self.num_output_tokens + end_idx = min(end_idx, input_length, self.max_length) + + if start_idx < end_idx: + chosen_ids = input_ids[0, start_idx:end_idx] + match_found = True + break + if match_found: + break + + if chosen_ids is None or len(chosen_ids) == 0: + # In case we didn't find a match return the input sequence unchanged, reverts back to autoregressive decoding + return input_ids, None + + # Now need extend input_ids with chosen_ids + chosen_ids = chosen_ids.unsqueeze(0) + candidate_input_ids = torch.cat((input_ids, chosen_ids), dim=1) + # assisted_generation expects logits as well, but we don't have those here, so returning None + return candidate_input_ids, None + + def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int): + """ + Updates the candidate generation strategy based on the outcomes. + + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using + beam search or log softmax for each vocabulary token when using beam search + num_matches (`int`): + The number of matches between the candidate sequences and the model predictions. + """ + # Currently does nothing + return + + +def _crop_past_key_values(model, past_key_values, maximum_length): + """Crops the past key values up to a certain maximum length.""" + new_past = [] + if model.config.is_encoder_decoder: + for idx in range(len(past_key_values)): + new_past.append( + ( + past_key_values[idx][0][:, :, :maximum_length, :], + past_key_values[idx][1][:, :, :maximum_length, :], + past_key_values[idx][2], + past_key_values[idx][3], + ) + ) + past_key_values = tuple(new_past) + # bloom is special + elif "bloom" in model.__class__.__name__.lower() or ( + model.config.architectures is not None and "bloom" in model.config.architectures[0].lower() + ): + for idx in range(len(past_key_values)): + new_past.append( + ( + past_key_values[idx][0][:, :, :maximum_length], + past_key_values[idx][1][:, :maximum_length, :], + ) + ) + past_key_values = tuple(new_past) + # gptbigcode is too + elif "gptbigcode" in model.__class__.__name__.lower() or ( + model.config.architectures is not None and "gptbigcode" in model.config.architectures[0].lower() + ): + if model.config.multi_query: + for idx in range(len(past_key_values)): + past_key_values[idx] = past_key_values[idx][:, :maximum_length, :] + else: + for idx in range(len(past_key_values)): + past_key_values[idx] = past_key_values[idx][:, :, :maximum_length, :] + elif isinstance(past_key_values, DynamicCache): + for idx in range(len(past_key_values.key_cache)): + if past_key_values.value_cache[idx].shape[-1] != 0: + past_key_values.key_cache[idx] = past_key_values.key_cache[idx][:, :, :maximum_length, :] + past_key_values.value_cache[idx] = past_key_values.value_cache[idx][:, :, :maximum_length, :] + + elif past_key_values is not None: + for idx in range(len(past_key_values)): + new_past.append( + ( + past_key_values[idx][0][:, :, :maximum_length, :], + past_key_values[idx][1][:, :, :maximum_length, :], + ) + ) + past_key_values = tuple(new_past) + return past_key_values + + +def _prepare_attention_mask(model_kwargs: Dict[str, Any], new_length: int, is_encoder_decoder: bool) -> Dict[str, Any]: + """Expands or crops the model's mask for decoding purposes, to the defined length""" + + mask_key = "decoder_attention_mask" if is_encoder_decoder else "attention_mask" + if mask_key not in model_kwargs: + return model_kwargs + + mask = model_kwargs[mask_key] + mask_length_diff = new_length - mask.shape[1] + + if mask_length_diff < 0: + model_kwargs[mask_key] = mask[:, :mask_length_diff] + elif mask_length_diff > 0: + model_kwargs[mask_key] = torch.cat([mask, mask.new_ones((mask.shape[0], mask_length_diff))], dim=-1) + return model_kwargs + + +def _prepare_token_type_ids(model_kwargs: Dict[str, Any], new_length: int) -> Dict[str, Any]: + """Expands or crops the model's token_type_ids for decoding purposes, to the defined length""" + if "token_type_ids" not in model_kwargs or model_kwargs["token_type_ids"] is None: + return model_kwargs + + token_type_ids = model_kwargs["token_type_ids"] + final_token_type = token_type_ids[:, -1].unsqueeze(-1) + type_length_diff = new_length - token_type_ids.shape[1] + + if type_length_diff < 0: + token_type_ids = token_type_ids[:, :type_length_diff] + elif type_length_diff > 0: + token_type_copies = final_token_type.repeat(1, type_length_diff) + model_kwargs["token_type_ids"] = torch.cat([model_kwargs["token_type_ids"], token_type_copies], dim=-1) + return model_kwargs diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/configuration_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/configuration_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f40960c213ea67372c9200e0bdfc77ea94892b26 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/configuration_utils.py @@ -0,0 +1,1092 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Generation configuration class and utilities.""" + +import copy +import json +import os +import warnings +from typing import TYPE_CHECKING, Any, Dict, Optional, Union + +from .. import __version__ +from ..configuration_utils import PretrainedConfig +from ..utils import ( + GENERATION_CONFIG_NAME, + ExplicitEnum, + PushToHubMixin, + cached_file, + download_url, + extract_commit_hash, + is_remote_url, + logging, +) + + +if TYPE_CHECKING: + from ..modeling_utils import PreTrainedModel + + +logger = logging.get_logger(__name__) +METADATA_FIELDS = ("_from_model_config", "_commit_hash", "_original_object_hash", "transformers_version") + + +class GenerationMode(ExplicitEnum): + """ + Possible generation modes, downstream of the [`~generation.GenerationMixin.generate`] method. + """ + + # Non-beam methods + CONTRASTIVE_SEARCH = "contrastive_search" + GREEDY_SEARCH = "greedy_search" + SAMPLE = "sample" + ASSISTED_GENERATION = "assisted_generation" + # Beam methods + BEAM_SEARCH = "beam_search" + BEAM_SAMPLE = "beam_sample" + CONSTRAINED_BEAM_SEARCH = "constrained_beam_search" + GROUP_BEAM_SEARCH = "group_beam_search" + + +class GenerationConfig(PushToHubMixin): + # no-format + r""" + Class that holds a configuration for a generation task. A `generate` call supports the following generation methods + for text-decoder, text-to-text, speech-to-text, and vision-to-text models: + + - *greedy decoding* by calling [`~generation.GenerationMixin._greedy_search`] if `num_beams=1` and + `do_sample=False` + - *contrastive search* by calling [`~generation.GenerationMixin._contrastive_search`] if `penalty_alpha>0.` + and `top_k>1` + - *multinomial sampling* by calling [`~generation.GenerationMixin._sample`] if `num_beams=1` and + `do_sample=True` + - *beam-search decoding* by calling [`~generation.GenerationMixin._beam_search`] if `num_beams>1` and + `do_sample=False` + - *beam-search multinomial sampling* by calling [`~generation.GenerationMixin._beam_sample`] if + `num_beams>1` and `do_sample=True` + - *diverse beam-search decoding* by calling [`~generation.GenerationMixin._group_beam_search`], if + `num_beams>1` and `num_beam_groups>1` + - *constrained beam-search decoding* by calling [`~generation.GenerationMixin._constrained_beam_search`], if + `constraints!=None` or `force_words_ids!=None` + - *assisted decoding* by calling [`~generation.GenerationMixin._assisted_decoding`], if + `assistant_model` or `prompt_lookup_num_tokens` is passed to `.generate()` + + You do not need to call any of the above methods directly. Pass custom parameter values to '.generate()'. To learn + more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). + + + + A large number of these flags control the logits or the stopping criteria of the generation. Make sure you check + the [generate-related classes](https://huggingface.co/docs/transformers/internal/generation_utils) for a full + description of the possible manipulations, as well as examples of their usage. + + + + Arg: + > Parameters that control the length of the output + + max_length (`int`, *optional*, defaults to 20): + The maximum length the generated tokens can have. Corresponds to the length of the input prompt + + `max_new_tokens`. Its effect is overridden by `max_new_tokens`, if also set. + max_new_tokens (`int`, *optional*): + The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt. + min_length (`int`, *optional*, defaults to 0): + The minimum length of the sequence to be generated. Corresponds to the length of the input prompt + + `min_new_tokens`. Its effect is overridden by `min_new_tokens`, if also set. + min_new_tokens (`int`, *optional*): + The minimum numbers of tokens to generate, ignoring the number of tokens in the prompt. + early_stopping (`bool` or `str`, *optional*, defaults to `False`): + Controls the stopping condition for beam-based methods, like beam-search. It accepts the following values: + `True`, where the generation stops as soon as there are `num_beams` complete candidates; `False`, where an + heuristic is applied and the generation stops when is it very unlikely to find better candidates; + `"never"`, where the beam search procedure only stops when there cannot be better candidates (canonical + beam search algorithm). + max_time(`float`, *optional*): + The maximum amount of time you allow the computation to run for in seconds. generation will still finish + the current pass after allocated time has been passed. + + > Parameters that control the generation strategy used + + do_sample (`bool`, *optional*, defaults to `False`): + Whether or not to use sampling ; use greedy decoding otherwise. + num_beams (`int`, *optional*, defaults to 1): + Number of beams for beam search. 1 means no beam search. + num_beam_groups (`int`, *optional*, defaults to 1): + Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. + [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. + penalty_alpha (`float`, *optional*): + The values balance the model confidence and the degeneration penalty in contrastive search decoding. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should use the past last key/values attentions (if applicable to the model) to + speed up decoding. + + > Parameters for manipulation of the model output logits + + temperature (`float`, *optional*, defaults to 1.0): + The value used to modulate the next token probabilities. + top_k (`int`, *optional*, defaults to 50): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + top_p (`float`, *optional*, defaults to 1.0): + If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to + `top_p` or higher are kept for generation. + typical_p (`float`, *optional*, defaults to 1.0): + Local typicality measures how similar the conditional probability of predicting a target token next is to + the expected conditional probability of predicting a random token next, given the partial text already + generated. If set to float < 1, the smallest set of the most locally typical tokens with probabilities that + add up to `typical_p` or higher are kept for generation. See [this + paper](https://arxiv.org/pdf/2202.00666.pdf) for more details. + epsilon_cutoff (`float`, *optional*, defaults to 0.0): + If set to float strictly between 0 and 1, only tokens with a conditional probability greater than + `epsilon_cutoff` will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on the + size of the model. See [Truncation Sampling as Language Model + Desmoothing](https://arxiv.org/abs/2210.15191) for more details. + eta_cutoff (`float`, *optional*, defaults to 0.0): + Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly between + 0 and 1, a token is only considered if it is greater than either `eta_cutoff` or `sqrt(eta_cutoff) * + exp(-entropy(softmax(next_token_logits)))`. The latter term is intuitively the expected next token + probability, scaled by `sqrt(eta_cutoff)`. In the paper, suggested values range from 3e-4 to 2e-3, + depending on the size of the model. See [Truncation Sampling as Language Model + Desmoothing](https://arxiv.org/abs/2210.15191) for more details. + diversity_penalty (`float`, *optional*, defaults to 0.0): + This value is subtracted from a beam's score if it generates a token same as any beam from other group at a + particular time. Note that `diversity_penalty` is only effective if `group beam search` is enabled. + repetition_penalty (`float`, *optional*, defaults to 1.0): + The parameter for repetition penalty. 1.0 means no penalty. See [this + paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. + encoder_repetition_penalty (`float`, *optional*, defaults to 1.0): + The paramater for encoder_repetition_penalty. An exponential penalty on sequences that are not in the + original input. 1.0 means no penalty. + length_penalty (`float`, *optional*, defaults to 1.0): + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to + the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log + likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while + `length_penalty` < 0.0 encourages shorter sequences. + no_repeat_ngram_size (`int`, *optional*, defaults to 0): + If set to int > 0, all ngrams of that size can only occur once. + bad_words_ids(`List[List[int]]`, *optional*): + List of list of token ids that are not allowed to be generated. Check + [`~generation.NoBadWordsLogitsProcessor`] for further documentation and examples. + force_words_ids(`List[List[int]]` or `List[List[List[int]]]`, *optional*): + List of token ids that must be generated. If given a `List[List[int]]`, this is treated as a simple list of + words that must be included, the opposite to `bad_words_ids`. If given `List[List[List[int]]]`, this + triggers a [disjunctive constraint](https://github.com/huggingface/transformers/issues/14081), where one + can allow different forms of each word. + renormalize_logits (`bool`, *optional*, defaults to `False`): + Whether to renormalize the logits after applying all the logits processors or warpers (including the custom + ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the score logits + are normalized but some logit processors or warpers break the normalization. + constraints (`List[Constraint]`, *optional*): + Custom constraints that can be added to the generation to ensure that the output will contain the use of + certain tokens as defined by `Constraint` objects, in the most sensible way possible. + forced_bos_token_id (`int`, *optional*, defaults to `model.config.forced_bos_token_id`): + The id of the token to force as the first generated token after the `decoder_start_token_id`. Useful for + multilingual models like [mBART](../model_doc/mbart) where the first generated token needs to be the target + language token. + forced_eos_token_id (`Union[int, List[int]]`, *optional*, defaults to `model.config.forced_eos_token_id`): + The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a + list to set multiple *end-of-sequence* tokens. + remove_invalid_values (`bool`, *optional*, defaults to `model.config.remove_invalid_values`): + Whether to remove possible *nan* and *inf* outputs of the model to prevent the generation method to crash. + Note that using `remove_invalid_values` can slow down generation. + exponential_decay_length_penalty (`tuple(int, float)`, *optional*): + This Tuple adds an exponentially increasing length penalty, after a certain amount of tokens have been + generated. The tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where + penalty starts and `decay_factor` represents the factor of exponential decay + suppress_tokens (`List[int]`, *optional*): + A list of tokens that will be suppressed at generation. The `SupressTokens` logit processor will set their + log probs to `-inf` so that they are not sampled. + begin_suppress_tokens (`List[int]`, *optional*): + A list of tokens that will be suppressed at the beginning of the generation. The `SupressBeginTokens` logit + processor will set their log probs to `-inf` so that they are not sampled. + forced_decoder_ids (`List[List[int]]`, *optional*): + A list of pairs of integers which indicates a mapping from generation indices to token indices that will be + forced before sampling. For example, `[[1, 123]]` means the second generated token will always be a token + of index 123. + sequence_bias (`Dict[Tuple[int], float]`, *optional*)): + Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the + sequence being selected, while negative biases do the opposite. Check + [`~generation.SequenceBiasLogitsProcessor`] for further documentation and examples. + guidance_scale (`float`, *optional*): + The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`. + Higher guidance scale encourages the model to generate samples that are more closely linked to the input + prompt, usually at the expense of poorer quality. + low_memory (`bool`, *optional*): + Switch to sequential beam search and sequential topk for contrastive search to reduce peak memory. + Used with beam search and contrastive search. + + + > Parameters that define the output variables of `generate` + + num_return_sequences(`int`, *optional*, defaults to 1): + The number of independently computed returned sequences for each element in the batch. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + output_logits (`bool`, *optional*): + Whether or not to return the unprocessed prediction logit scores. See `logits` under returned tensors for + more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + + > Special tokens that can be used at generation time + + pad_token_id (`int`, *optional*): + The id of the *padding* token. + bos_token_id (`int`, *optional*): + The id of the *beginning-of-sequence* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + + > Generation parameters exclusive to encoder-decoder models + + encoder_no_repeat_ngram_size (`int`, *optional*, defaults to 0): + If set to int > 0, all ngrams of that size that occur in the `encoder_input_ids` cannot occur in the + `decoder_input_ids`. + decoder_start_token_id (`Union[int, List[int]]`, *optional*): + If an encoder-decoder model starts decoding with a different token than *bos*, the id of that token or a list of length + `batch_size`. Indicating a list enables different start ids for each element in the batch + (e.g. multilingual models with different target languages in one batch) + + + > Generation parameters exclusive to [assistant generation](https://arxiv.org/abs/2211.17192) + + num_assistant_tokens (`int`, *optional*, defaults to 5): + Defines the number of _speculative tokens_ that shall be generated by the assistant model before being + checked by the target model at each iteration. Higher values for `num_assistant_tokens` make the generation + more _speculative_ : If the assistant model is performant larger speed-ups can be reached, if the assistant + model requires lots of corrections, lower speed-ups are reached. + + num_assistant_tokens_schedule (`str`, *optional*, defaults to `"heuristic"`): + Defines the schedule at which max assistant tokens shall be changed during inference. + - `"heuristic"`: When all speculative tokens are correct, increase `num_assistant_tokens` by 2 else + reduce by 1. `num_assistant_tokens` value is persistent over multiple generation calls with the same assistant model. + - `"heuristic_transient"`: Same as `"heuristic"` but `num_assistant_tokens` is reset to its initial value after each generation call. + - `"constant"`: `num_assistant_tokens` stays unchanged during generation + + prompt_lookup_num_tokens (`int`, *optional*, default to `None`): + The number of tokens to be output as candidate tokens. + + max_matching_ngram_size (`int`, *optional*, default to `None`): + The maximum ngram size to be considered for matching in the prompt. Default to 2 if not provided. + + > Parameters specific to the caching mechanism: + + cache_implementation (`str`, *optional*, default to `None`): + Cache class that should be used when generating. + + + > Wild card + + generation_kwargs: + Additional generation kwargs will be forwarded to the `generate` function of the model. Kwargs that are not + present in `generate`'s signature will be used in the model forward pass. + """ + + def __init__(self, **kwargs): + # Parameters that control the length of the output + self.max_length = kwargs.pop("max_length", 20) + self.max_new_tokens = kwargs.pop("max_new_tokens", None) + self.min_length = kwargs.pop("min_length", 0) + self.min_new_tokens = kwargs.pop("min_new_tokens", None) + self.early_stopping = kwargs.pop("early_stopping", False) + self.max_time = kwargs.pop("max_time", None) + + # Parameters that control the generation strategy used + self.do_sample = kwargs.pop("do_sample", False) + self.num_beams = kwargs.pop("num_beams", 1) + self.num_beam_groups = kwargs.pop("num_beam_groups", 1) + self.penalty_alpha = kwargs.pop("penalty_alpha", None) + self.use_cache = kwargs.pop("use_cache", True) + + # Parameters for manipulation of the model output logits + self.temperature = kwargs.pop("temperature", 1.0) + self.top_k = kwargs.pop("top_k", 50) + self.top_p = kwargs.pop("top_p", 1.0) + self.typical_p = kwargs.pop("typical_p", 1.0) + self.epsilon_cutoff = kwargs.pop("epsilon_cutoff", 0.0) + self.eta_cutoff = kwargs.pop("eta_cutoff", 0.0) + self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0) + self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0) + self.encoder_repetition_penalty = kwargs.pop("encoder_repetition_penalty", 1.0) + self.length_penalty = kwargs.pop("length_penalty", 1.0) + self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0) + self.bad_words_ids = kwargs.pop("bad_words_ids", None) + self.force_words_ids = kwargs.pop("force_words_ids", None) + self.renormalize_logits = kwargs.pop("renormalize_logits", False) + self.constraints = kwargs.pop("constraints", None) + self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None) + self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None) + self.remove_invalid_values = kwargs.pop("remove_invalid_values", False) + self.exponential_decay_length_penalty = kwargs.pop("exponential_decay_length_penalty", None) + self.suppress_tokens = kwargs.pop("suppress_tokens", None) + self.begin_suppress_tokens = kwargs.pop("begin_suppress_tokens", None) + self.forced_decoder_ids = kwargs.pop("forced_decoder_ids", None) + self.sequence_bias = kwargs.pop("sequence_bias", None) + self.guidance_scale = kwargs.pop("guidance_scale", None) + self.low_memory = kwargs.pop("low_memory", None) + + # Parameters that define the output variables of `generate` + self.num_return_sequences = kwargs.pop("num_return_sequences", 1) + self.output_attentions = kwargs.pop("output_attentions", False) + self.output_hidden_states = kwargs.pop("output_hidden_states", False) + self.output_scores = kwargs.pop("output_scores", False) + self.output_logits = kwargs.pop("output_logits", None) + self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False) + + # Special tokens that can be used at generation time + self.pad_token_id = kwargs.pop("pad_token_id", None) + self.bos_token_id = kwargs.pop("bos_token_id", None) + self.eos_token_id = kwargs.pop("eos_token_id", None) + + # Generation parameters exclusive to encoder-decoder models + self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0) + self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None) + + # Assistant generation + self.num_assistant_tokens = kwargs.pop("num_assistant_tokens", 5) + self.num_assistant_tokens_schedule = kwargs.pop("num_assistant_tokens_schedule", "heuristic") + + # Cache implementation + self.cache_implementation = kwargs.pop("cache_implementation", None) + + # Prompt lookup decoding + self.prompt_lookup_num_tokens = kwargs.pop("prompt_lookup_num_tokens", None) + self.max_matching_ngram_size = kwargs.pop("max_matching_ngram_size", None) + + # Wild card + self.generation_kwargs = kwargs.pop("generation_kwargs", {}) + + # The remaining attributes do not parametrize `.generate()`, but are informative and/or used by the hub + # interface. + self._from_model_config = kwargs.pop("_from_model_config", False) + self._commit_hash = kwargs.pop("_commit_hash", None) + self.transformers_version = kwargs.pop("transformers_version", __version__) + + # Additional attributes without default values + if not self._from_model_config: + # we don't want to copy values from the model config if we're initializing a `GenerationConfig` from a + # model's default configuration file + for key, value in kwargs.items(): + try: + setattr(self, key, value) + except AttributeError as err: + logger.error(f"Can't set {key} with value {value} for {self}") + raise err + + # Validate the values of the attributes + self.validate(is_init=True) + + def __hash__(self): + return hash(self.to_json_string(ignore_metadata=True)) + + def __eq__(self, other): + if not isinstance(other, GenerationConfig): + return False + + self_without_metadata = self.to_json_string(use_diff=False, ignore_metadata=True) + other_without_metadata = other.to_json_string(use_diff=False, ignore_metadata=True) + return self_without_metadata == other_without_metadata + + def __repr__(self): + return f"{self.__class__.__name__} {self.to_json_string(ignore_metadata=True)}" + + def get_generation_mode(self, assistant_model: Optional["PreTrainedModel"] = None) -> GenerationMode: + """ + Returns the generation mode triggered by the [`GenerationConfig`] instance. + + Arg: + assistant_model (`PreTrainedModel`, *optional*): + The assistant model to be used for assisted generation. If set, the generation mode will be + assisted generation. + + Returns: + `GenerationMode`: The generation mode triggered by the instance. + """ + # TODO joao: find out a way of not depending on external fields (e.g. `assistant_model`), then make this a + # property and part of the `__repr__` + if self.constraints is not None or self.force_words_ids is not None: + generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH + elif self.num_beams == 1: + if self.do_sample is False: + if ( + self.top_k is not None + and self.top_k > 1 + and self.penalty_alpha is not None + and self.penalty_alpha > 0 + ): + generation_mode = GenerationMode.CONTRASTIVE_SEARCH + else: + generation_mode = GenerationMode.GREEDY_SEARCH + else: + generation_mode = GenerationMode.SAMPLE + else: + if self.num_beam_groups > 1: + generation_mode = GenerationMode.GROUP_BEAM_SEARCH + elif self.do_sample is True: + generation_mode = GenerationMode.BEAM_SAMPLE + else: + generation_mode = GenerationMode.BEAM_SEARCH + + # Assisted generation may extend some generation modes + if assistant_model is not None or self.prompt_lookup_num_tokens is not None: + if generation_mode in ("greedy_search", "sample"): + generation_mode = GenerationMode.ASSISTED_GENERATION + else: + raise ValueError( + "You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate " + "is only supported with Greedy Search and Sample." + ) + return generation_mode + + def validate(self, is_init=False): + """ + Validates the values of the attributes of the [`GenerationConfig`] instance. Raises exceptions in the presence + of parameterization that can be detected as incorrect from the configuration instance alone. + + Note that some parameters not validated here are best validated at generate runtime, as they may depend on + other inputs and/or the model, such as parameters related to the generation length. + + Arg: + is_init (`bool`, *optional*, defaults to `False`): + Whether the validation is performed during the initialization of the instance. + """ + + # Validation of individual attributes + if self.early_stopping not in {True, False, "never"}: + raise ValueError(f"`early_stopping` must be a boolean or 'never', but is {self.early_stopping}.") + if self.max_new_tokens is not None and self.max_new_tokens <= 0: + raise ValueError(f"`max_new_tokens` must be greater than 0, but is {self.max_new_tokens}.") + + # Validation of attribute relations: + fix_location = "" + if is_init: + fix_location = ( + " This was detected when initializing the generation config instance, which means the corresponding " + "file may hold incorrect parameterization and should be fixed." + ) + + # 1. detect sampling-only parameterization when not in sampling mode + if self.do_sample is False: + greedy_wrong_parameter_msg = ( + "`do_sample` is set to `False`. However, `{flag_name}` is set to `{flag_value}` -- this flag is only " + "used in sample-based generation modes. You should set `do_sample=True` or unset `{flag_name}`." + + fix_location + ) + if self.temperature is not None and self.temperature != 1.0: + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="temperature", flag_value=self.temperature), + UserWarning, + ) + if self.top_p is not None and self.top_p != 1.0: + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="top_p", flag_value=self.top_p), + UserWarning, + ) + if self.typical_p is not None and self.typical_p != 1.0: + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="typical_p", flag_value=self.typical_p), + UserWarning, + ) + if ( + self.top_k is not None and self.top_k != 50 and self.penalty_alpha is None + ): # contrastive search uses top_k + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="top_k", flag_value=self.top_k), + UserWarning, + ) + if self.epsilon_cutoff is not None and self.epsilon_cutoff != 0.0: + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="epsilon_cutoff", flag_value=self.epsilon_cutoff), + UserWarning, + ) + if self.eta_cutoff is not None and self.eta_cutoff != 0.0: + warnings.warn( + greedy_wrong_parameter_msg.format(flag_name="eta_cutoff", flag_value=self.eta_cutoff), + UserWarning, + ) + + # 2. detect beam-only parameterization when not in beam mode + if self.num_beams is None: + warnings.warn("`num_beams` is set to None - defaulting to 1.", UserWarning) + self.num_beams = 1 + + if self.num_beams == 1: + single_beam_wrong_parameter_msg = ( + "`num_beams` is set to 1. However, `{flag_name}` is set to `{flag_value}` -- this flag is only used " + "in beam-based generation modes. You should set `num_beams>1` or unset `{flag_name}`." + fix_location + ) + if self.early_stopping is not False: + warnings.warn( + single_beam_wrong_parameter_msg.format(flag_name="early_stopping", flag_value=self.early_stopping), + UserWarning, + ) + if self.num_beam_groups is not None and self.num_beam_groups != 1: + warnings.warn( + single_beam_wrong_parameter_msg.format( + flag_name="num_beam_groups", flag_value=self.num_beam_groups + ), + UserWarning, + ) + if self.diversity_penalty is not None and self.diversity_penalty != 0.0: + warnings.warn( + single_beam_wrong_parameter_msg.format( + flag_name="diversity_penalty", flag_value=self.diversity_penalty + ), + UserWarning, + ) + if self.length_penalty is not None and self.length_penalty != 1.0: + warnings.warn( + single_beam_wrong_parameter_msg.format(flag_name="length_penalty", flag_value=self.length_penalty), + UserWarning, + ) + if self.constraints is not None: + warnings.warn( + single_beam_wrong_parameter_msg.format(flag_name="constraints", flag_value=self.constraints), + UserWarning, + ) + + # 3. detect incorrect paramaterization specific to advanced beam modes + else: + # constrained beam search + if self.constraints is not None or self.force_words_ids is not None: + constrained_wrong_parameter_msg = ( + "one of `constraints`, `force_words_ids` is not `None`, triggering constrained beam search. However, " + "`{flag_name}` is set to `{flag_value}`, which is incompatible with this generation mode. Set " + "`constraints` and `force_words_ids` to `None` or unset `{flag_name}` to continue." + fix_location + ) + if self.do_sample is True: + raise ValueError( + constrained_wrong_parameter_msg.format(flag_name="do_sample", flag_value=self.do_sample) + ) + if self.num_beam_groups is not None and self.num_beam_groups != 1: + raise ValueError( + constrained_wrong_parameter_msg.format( + flag_name="num_beam_groups", flag_value=self.num_beam_groups + ) + ) + # group beam search + if self.diversity_penalty != 0.0 or self.num_beam_groups != 1: + group_error_prefix = ( + "`diversity_penalty` is not 0.0 or `num_beam_groups` is not 1, triggering group beam search. In " + "this generation mode, " + ) + if self.do_sample is True: + raise ValueError(group_error_prefix + "`do_sample` must be set to `False`") + if self.num_beams % self.num_beam_groups != 0: + raise ValueError(group_error_prefix + "`num_beams` should be divisible by `num_beam_groups`") + if self.diversity_penalty == 0.0: + raise ValueError( + group_error_prefix + + "`diversity_penalty` should be greater than `0.0`, otherwise your groups will be identical." + ) + + # 4. check `num_return_sequences` + if self.num_return_sequences != 1: + if self.num_beams == 1: + if self.do_sample is False: + raise ValueError( + "Greedy methods without beam search do not support `num_return_sequences` different than 1 " + f"(got {self.num_return_sequences})." + ) + elif self.num_return_sequences > self.num_beams: + raise ValueError( + f"`num_return_sequences` ({self.num_return_sequences}) has to be smaller or equal to `num_beams` " + f"({self.num_beams})." + ) + + # 5. check common issue: passing `generate` arguments inside the generation config + generate_arguments = ( + "logits_processor", + "stopping_criteria", + "prefix_allowed_tokens_fn", + "synced_gpus", + "assistant_model", + "streamer", + "negative_prompt_ids", + "negative_prompt_attention_mask", + ) + for arg in generate_arguments: + if hasattr(self, arg): + raise ValueError( + f"Argument `{arg}` is not a valid argument of `GenerationConfig`. It should be passed to " + "`generate()` (or a pipeline) directly." + ) + + def save_pretrained( + self, + save_directory: Union[str, os.PathLike], + config_file_name: Optional[Union[str, os.PathLike]] = None, + push_to_hub: bool = False, + **kwargs, + ): + r""" + Save a generation configuration object to the directory `save_directory`, so that it can be re-loaded using the + [`~GenerationConfig.from_pretrained`] class method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the configuration JSON file will be saved (will be created if it does not exist). + config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`): + Name of the generation configuration JSON file to be saved in `save_directory`. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs (`Dict[str, Any]`, *optional*): + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + + # At save time, validate the instance -- if any warning/exception is thrown, we refuse to save the instance. + # This strictness is enforced to prevent bad configurations from being saved and re-used. + try: + with warnings.catch_warnings(record=True) as caught_warnings: + self.validate() + if len(caught_warnings) > 0: + raise ValueError(str([w.message for w in caught_warnings])) + except ValueError as exc: + raise ValueError( + "The generation config instance is invalid -- `.validate()` throws warnings and/or exceptions. " + "Fix these issues to save the configuration.\n\nThrown during validation:\n" + str(exc) + ) + + use_auth_token = kwargs.pop("use_auth_token", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if kwargs.get("token", None) is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + kwargs["token"] = use_auth_token + + config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME + + if os.path.isfile(save_directory): + raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file") + + os.makedirs(save_directory, exist_ok=True) + + if push_to_hub: + commit_message = kwargs.pop("commit_message", None) + repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1]) + repo_id = self._create_repo(repo_id, **kwargs) + files_timestamps = self._get_files_timestamps(save_directory) + + output_config_file = os.path.join(save_directory, config_file_name) + + self.to_json_file(output_config_file, use_diff=True) + logger.info(f"Configuration saved in {output_config_file}") + + if push_to_hub: + self._upload_modified_files( + save_directory, + repo_id, + files_timestamps, + commit_message=commit_message, + token=kwargs.get("token"), + ) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name: Union[str, os.PathLike], + config_file_name: Optional[Union[str, os.PathLike]] = None, + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + **kwargs, + ) -> "GenerationConfig": + r""" + Instantiate a [`GenerationConfig`] from a generation configuration file. + + Args: + pretrained_model_name (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained model configuration hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a configuration file saved using the + [`~GenerationConfig.save_pretrained`] method, e.g., `./my_model_directory/`. + config_file_name (`str` or `os.PathLike`, *optional*, defaults to `"generation_config.json"`): + Name of the generation configuration JSON file to be loaded from `pretrained_model_name`. + cache_dir (`str` or `os.PathLike`, *optional*): + Path to a directory in which a downloaded pretrained model configuration should be cached if the + standard cache should not be used. + force_download (`bool`, *optional*, defaults to `False`): + Whether or not to force to (re-)download the configuration files and override the cached versions if + they exist. + resume_download (`bool`, *optional*, defaults to `False`): + Whether or not to delete incompletely received file. Attempts to resume the download if such a file + exists. + proxies (`Dict[str, str]`, *optional*): + A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', + 'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request. + token (`str` or `bool`, *optional*): + The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use + the token generated when running `huggingface-cli login` (stored in `~/.huggingface`). + revision (`str`, *optional*, defaults to `"main"`): + The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a + git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any + identifier allowed by git. + + + + To test a pull request you made on the Hub, you can pass `revision="refs/pr/". + + + + return_unused_kwargs (`bool`, *optional*, defaults to `False`): + If `False`, then this function returns just the final configuration object. + + If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a + dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the + part of `kwargs` which has not been used to update `config` and is otherwise ignored. + subfolder (`str`, *optional*, defaults to `""`): + In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can + specify the folder name here. + kwargs (`Dict[str, Any]`, *optional*): + The values in kwargs of any keys which are configuration attributes will be used to override the loaded + values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled + by the `return_unused_kwargs` keyword parameter. + + Returns: + [`GenerationConfig`]: The configuration object instantiated from this pretrained model. + + Examples: + + ```python + >>> from transformers import GenerationConfig + + >>> # Download configuration from huggingface.co and cache. + >>> generation_config = GenerationConfig.from_pretrained("openai-community/gpt2") + + >>> # E.g. config was saved using *save_pretrained('./test/saved_model/')* + >>> generation_config.save_pretrained("./test/saved_model/") + >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/") + + >>> # You can also specify configuration names to your generation configuration file + >>> generation_config.save_pretrained("./test/saved_model/", config_file_name="my_configuration.json") + >>> generation_config = GenerationConfig.from_pretrained("./test/saved_model/", "my_configuration.json") + + >>> # If you'd like to try a minor variation to an existing configuration, you can also pass generation + >>> # arguments to `.from_pretrained()`. Be mindful that typos and unused arguments will be ignored + >>> generation_config, unused_kwargs = GenerationConfig.from_pretrained( + ... "openai-community/gpt2", top_k=1, foo=False, do_sample=True, return_unused_kwargs=True + ... ) + >>> generation_config.top_k + 1 + + >>> unused_kwargs + {'foo': False} + ```""" + config_file_name = config_file_name if config_file_name is not None else GENERATION_CONFIG_NAME + + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + use_auth_token = kwargs.pop("use_auth_token", None) + subfolder = kwargs.pop("subfolder", "") + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + commit_hash = kwargs.pop("_commit_hash", None) + + if use_auth_token is not None: + warnings.warn( + "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", + FutureWarning, + ) + if token is not None: + raise ValueError( + "`token` and `use_auth_token` are both specified. Please set only the argument `token`." + ) + token = use_auth_token + + user_agent = {"file_type": "config", "from_auto_class": from_auto_class} + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + config_path = os.path.join(pretrained_model_name, config_file_name) + config_path = str(config_path) + + is_local = os.path.exists(config_path) + if os.path.isfile(os.path.join(subfolder, config_path)): + # Special case when config_path is a local file + resolved_config_file = config_path + is_local = True + elif is_remote_url(config_path): + configuration_file = config_path + resolved_config_file = download_url(config_path) + else: + configuration_file = config_file_name + try: + # Load from local folder or from cache or download from model Hub and cache + resolved_config_file = cached_file( + pretrained_model_name, + configuration_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + token=token, + user_agent=user_agent, + revision=revision, + subfolder=subfolder, + _commit_hash=commit_hash, + ) + commit_hash = extract_commit_hash(resolved_config_file, commit_hash) + except EnvironmentError: + # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to + # the original exception. + raise + except Exception: + # For any other exception, we throw a generic error. + raise EnvironmentError( + f"Can't load the configuration of '{pretrained_model_name}'. If you were trying to load it" + " from 'https://huggingface.co/models', make sure you don't have a local directory with the same" + f" name. Otherwise, make sure '{pretrained_model_name}' is the correct path to a directory" + f" containing a {configuration_file} file" + ) + + try: + # Load config dict + config_dict = cls._dict_from_json_file(resolved_config_file) + config_dict["_commit_hash"] = commit_hash + except (json.JSONDecodeError, UnicodeDecodeError): + raise EnvironmentError( + f"It looks like the config file at '{resolved_config_file}' is not a valid JSON file." + ) + + if is_local: + logger.info(f"loading configuration file {resolved_config_file}") + else: + logger.info(f"loading configuration file {configuration_file} from cache at {resolved_config_file}") + + if kwargs.get("return_unused_kwargs") is True: + config, unused_kwargs = cls.from_dict(config_dict, **kwargs) + config._original_object_hash = hash(config) # Hash to detect whether the instance was modified + return config, unused_kwargs + else: + config = cls.from_dict(config_dict, **kwargs) + config._original_object_hash = hash(config) # Hash to detect whether the instance was modified + return config + + @classmethod + def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]): + with open(json_file, "r", encoding="utf-8") as reader: + text = reader.read() + return json.loads(text) + + @classmethod + def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "GenerationConfig": + """ + Instantiates a [`GenerationConfig`] from a Python dictionary of parameters. + + Args: + config_dict (`Dict[str, Any]`): + Dictionary that will be used to instantiate the configuration object. + kwargs (`Dict[str, Any]`): + Additional parameters from which to initialize the configuration object. + + Returns: + [`GenerationConfig`]: The configuration object instantiated from those parameters. + """ + return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) + # Those arguments may be passed along for our internal telemetry. + # We remove them so they don't appear in `return_unused_kwargs`. + kwargs.pop("_from_auto", None) + kwargs.pop("_from_pipeline", None) + # The commit hash might have been updated in the `config_dict`, we don't want the kwargs to erase that update. + if "_commit_hash" in kwargs and "_commit_hash" in config_dict: + kwargs["_commit_hash"] = config_dict["_commit_hash"] + + # The line below allows model-specific config to be loaded as well through kwargs, with safety checks. + # See https://github.com/huggingface/transformers/pull/21269 + config = cls(**{**config_dict, **kwargs}) + unused_kwargs = config.update(**kwargs) + + logger.info(f"Generate config {config}") + if return_unused_kwargs: + return config, unused_kwargs + else: + return config + + def dict_torch_dtype_to_str(self, d: Dict[str, Any]) -> None: + """ + Checks whether the passed dictionary and its nested dicts have a *torch_dtype* key and if it's not None, + converts torch.dtype to a string of just the type. For example, `torch.float32` get converted into *"float32"* + string, which can then be stored in the json format. + """ + if d.get("torch_dtype", None) is not None and not isinstance(d["torch_dtype"], str): + d["torch_dtype"] = str(d["torch_dtype"]).split(".")[1] + for value in d.values(): + if isinstance(value, dict): + self.dict_torch_dtype_to_str(value) + + def to_diff_dict(self) -> Dict[str, Any]: + """ + Removes all attributes from config which correspond to the default config attributes for better readability and + serializes to a Python dictionary. + + Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance, + """ + config_dict = self.to_dict() + + # get the default config dict + default_config_dict = GenerationConfig().to_dict() + + serializable_config_dict = {} + + # only serialize values that differ from the default config + for key, value in config_dict.items(): + if key not in default_config_dict or key == "transformers_version" or value != default_config_dict[key]: + serializable_config_dict[key] = value + + self.dict_torch_dtype_to_str(serializable_config_dict) + return serializable_config_dict + + def to_dict(self) -> Dict[str, Any]: + """ + Serializes this instance to a Python dictionary. + + Returns: + `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance. + """ + output = copy.deepcopy(self.__dict__) + + # Fields to ignore at serialization time + if "_commit_hash" in output: + del output["_commit_hash"] + if "_original_object_hash" in output: + del output["_original_object_hash"] + + # Transformers version when serializing this file + output["transformers_version"] = __version__ + + self.dict_torch_dtype_to_str(output) + return output + + def to_json_string(self, use_diff: bool = True, ignore_metadata: bool = False) -> str: + """ + Serializes this instance to a JSON string. + + Args: + use_diff (`bool`, *optional*, defaults to `True`): + If set to `True`, only the difference between the config instance and the default `GenerationConfig()` + is serialized to JSON string. + ignore_metadata (`bool`, *optional*, defaults to `False`): + Whether to ignore the metadata fields present in the instance + + Returns: + `str`: String containing all the attributes that make up this configuration instance in JSON format. + """ + if use_diff is True: + config_dict = self.to_diff_dict() + else: + config_dict = self.to_dict() + + if ignore_metadata: + for metadata_field in METADATA_FIELDS: + config_dict.pop(metadata_field, None) + + def convert_keys_to_string(obj): + if isinstance(obj, dict): + return {str(key): convert_keys_to_string(value) for key, value in obj.items()} + elif isinstance(obj, list): + return [convert_keys_to_string(item) for item in obj] + else: + return obj + + config_dict = convert_keys_to_string(config_dict) + + return json.dumps(config_dict, indent=2, sort_keys=True) + "\n" + + def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True): + """ + Save this instance to a JSON file. + + Args: + json_file_path (`str` or `os.PathLike`): + Path to the JSON file in which this configuration instance's parameters will be saved. + use_diff (`bool`, *optional*, defaults to `True`): + If set to `True`, only the difference between the config instance and the default `GenerationConfig()` + is serialized to JSON file. + """ + with open(json_file_path, "w", encoding="utf-8") as writer: + writer.write(self.to_json_string(use_diff=use_diff)) + + @classmethod + def from_model_config(cls, model_config: PretrainedConfig) -> "GenerationConfig": + """ + Instantiates a [`GenerationConfig`] from a [`PretrainedConfig`]. This function is useful to convert legacy + [`PretrainedConfig`] objects, which may contain generation parameters, into a stand-alone [`GenerationConfig`]. + + Args: + model_config (`PretrainedConfig`): + The model config that will be used to instantiate the generation config. + + Returns: + [`GenerationConfig`]: The configuration object instantiated from those parameters. + """ + config_dict = model_config.to_dict() + config_dict.pop("_from_model_config", None) + config = cls.from_dict(config_dict, return_unused_kwargs=False, _from_model_config=True) + + # Special case: some models have generation attributes set in the decoder. Use them if still unset in the + # generation config. + for decoder_name in ("decoder", "generator", "text_config"): + if decoder_name in config_dict: + default_generation_config = GenerationConfig() + decoder_config = config_dict[decoder_name] + for attr in config.to_dict().keys(): + if attr in decoder_config and getattr(config, attr) == getattr(default_generation_config, attr): + setattr(config, attr, decoder_config[attr]) + + config._original_object_hash = hash(config) # Hash to detect whether the instance was modified + return config + + def update(self, **kwargs): + """ + Updates attributes of this class instance with attributes from `kwargs` if they match existing atributtes, + returning all the unused kwargs. + + Args: + kwargs (`Dict[str, Any]`): + Dictionary of attributes to tentatively update this class. + + Returns: + `Dict[str, Any]`: Dictionary containing all the key-value pairs that were not used to update the instance. + """ + to_remove = [] + for key, value in kwargs.items(): + if hasattr(self, key): + setattr(self, key, value) + to_remove.append(key) + + # Confirm that the updated instance is still valid + self.validate() + + # Remove all the attributes that were updated, without modifying the input dict + unused_kwargs = {key: value for key, value in kwargs.items() if key not in to_remove} + return unused_kwargs diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py new file mode 100644 index 0000000000000000000000000000000000000000..84b5a38d5de4da99eb513e6a8a6859342bc2fcba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/flax_logits_process.py @@ -0,0 +1,544 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect + +import jax +import jax.lax as lax +import jax.numpy as jnp +from jax.experimental import sparse + +from ..utils import add_start_docstrings +from ..utils.logging import get_logger + + +logger = get_logger(__name__) + + +LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" + Args: + input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam + search or log softmax for each vocabulary token when using beam search + kwargs (`Dict[str, Any]`, *optional*): + Additional logits processor specific kwargs. + + Return: + `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. + +""" + + +class FlaxLogitsProcessor: + """Abstract base class for all logit processors that can be applied during generation.""" + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray: + """Flax method for processing logits.""" + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class FlaxLogitsWarper: + """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray) -> jnp.ndarray: + """Flax method for warping logits.""" + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class FlaxLogitsProcessorList(list): + """ + This class can be used to create a list of [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to subsequently process + a `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each + [`FlaxLogitsProcessor`] or [`FlaxLogitsWarper`] to the inputs. + """ + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int, **kwargs) -> jnp.ndarray: + for processor in self: + function_args = inspect.signature(processor.__call__).parameters + if len(function_args) > 3: + if not all(arg in kwargs for arg in list(function_args.keys())[2:]): + raise ValueError( + f"Make sure that all the required parameters: {list(function_args.keys())} for " + f"{processor.__class__} are passed to the logits processor." + ) + scores = processor(input_ids, scores, cur_len, **kwargs) + else: + scores = processor(input_ids, scores, cur_len) + return scores + + +class FlaxTemperatureLogitsWarper(FlaxLogitsWarper): + r""" + [`FlaxLogitsWarper`] for temperature (exponential scaling output probability distribution). + + Args: + temperature (`float`): + The value used to module the logits distribution. + """ + + def __init__(self, temperature: float): + if not isinstance(temperature, float) or not (temperature > 0): + raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") + + self.temperature = temperature + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + scores = scores / self.temperature + return scores + + +class FlaxTopPLogitsWarper(FlaxLogitsWarper): + """ + [`FlaxLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. + + Args: + top_p (`float`): + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + """ + + def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0): + raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") + if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): + raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") + + self.top_p = top_p + self.filter_value = filter_value + self.min_tokens_to_keep = min_tokens_to_keep + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + topk_scores, topk_indices = lax.top_k(scores, scores.shape[-1]) + + mask_scores = jnp.full_like(scores, self.filter_value) + cumulative_probs = jax.nn.softmax(topk_scores, axis=-1).cumsum(axis=-1) + score_mask = cumulative_probs < self.top_p + + # include the token that is higher than top_p as well + score_mask = jnp.roll(score_mask, 1) + score_mask |= score_mask.at[:, 0].set(True) + + # min tokens to keep + score_mask = score_mask.at[:, : self.min_tokens_to_keep].set(True) + + topk_next_scores = jnp.where(score_mask, topk_scores, mask_scores) + next_scores = jax.lax.sort_key_val(topk_indices, topk_next_scores)[-1] + + return next_scores + + +class FlaxTopKLogitsWarper(FlaxLogitsWarper): + r""" + [`FlaxLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. + + Args: + top_k (`int`): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + """ + + def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + if not isinstance(top_k, int) or top_k <= 0: + raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") + + self.top_k = max(top_k, min_tokens_to_keep) + self.filter_value = filter_value + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + batch_size, vocab_size = scores.shape + next_scores_flat = jnp.full(batch_size * vocab_size, self.filter_value) + + topk = min(self.top_k, scores.shape[-1]) # Safety check + topk_scores, topk_indices = lax.top_k(scores, topk) + shift = jnp.broadcast_to((jnp.arange(batch_size) * vocab_size)[:, None], (batch_size, topk)).flatten() + topk_scores_flat = topk_scores.flatten() + topk_indices_flat = topk_indices.flatten() + shift + + next_scores_flat = next_scores_flat.at[topk_indices_flat].set(topk_scores_flat) + next_scores = next_scores_flat.reshape(batch_size, vocab_size) + return next_scores + + +class FlaxForcedBOSTokenLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] that enforces the specified token as the first generated token. + + Args: + bos_token_id (`int`): + The id of the token to force as the first generated token. + """ + + def __init__(self, bos_token_id: int): + self.bos_token_id = bos_token_id + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + new_scores = jnp.full(scores.shape, -float("inf")) + + apply_penalty = 1 - jnp.bool_(cur_len - 1) + + scores = jnp.where(apply_penalty, new_scores.at[:, self.bos_token_id].set(0), scores) + + return scores + + +class FlaxForcedEOSTokenLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. + + Args: + max_length (`int`): + The maximum length of the sequence to be generated. + eos_token_id (`int`): + The id of the token to force as the last generated token when `max_length` is reached. + """ + + def __init__(self, max_length: int, eos_token_id: int): + self.max_length = max_length + self.eos_token_id = eos_token_id + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + new_scores = jnp.full(scores.shape, -float("inf")) + + apply_penalty = 1 - jnp.bool_(cur_len - self.max_length + 1) + + scores = jnp.where(apply_penalty, new_scores.at[:, self.eos_token_id].set(0), scores) + + return scores + + +class FlaxMinLengthLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] enforcing a min-length by setting EOS probability to 0. + + Args: + min_length (`int`): + The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. + eos_token_id (`int`): + The id of the *end-of-sequence* token. + """ + + def __init__(self, min_length: int, eos_token_id: int): + if not isinstance(min_length, int) or min_length < 0: + raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") + + if not isinstance(eos_token_id, int) or eos_token_id < 0: + raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") + + self.min_length = min_length + self.eos_token_id = eos_token_id + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + # create boolean flag to decide if min length penalty should be applied + apply_penalty = 1 - jnp.clip(cur_len - self.min_length, 0, 1) + + scores = jnp.where(apply_penalty, scores.at[:, self.eos_token_id].set(-float("inf")), scores) + + return scores + + +class FlaxSuppressTokensAtBeginLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] supressing a list of tokens as soon as the `generate` function starts generating using + `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are not sampled at the + begining of the generation. + + Args: + begin_suppress_tokens (`List[int]`): + Tokens to not sample. + begin_index (`int`): + Index where the tokens are suppressed. + """ + + def __init__(self, begin_suppress_tokens, begin_index): + self.begin_suppress_tokens = list(begin_suppress_tokens) + self.begin_index = begin_index + + def __call__(self, input_ids, scores, cur_len: int): + apply_penalty = 1 - jnp.bool_(cur_len - self.begin_index) + + scores = jnp.where(apply_penalty, scores.at[:, self.begin_suppress_tokens].set(-float("inf")), scores) + + return scores + + +class FlaxSuppressTokensLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] suppressing a list of tokens at each decoding step. The processor will set their log probs + to be `-inf` so they are not sampled. + + Args: + suppress_tokens (`list`): + Tokens to not sample. + """ + + def __init__(self, suppress_tokens: list): + self.suppress_tokens = list(suppress_tokens) + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + scores = scores.at[..., self.suppress_tokens].set(-float("inf")) + + return scores + + +class FlaxForceTokensLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] that takes a list of pairs of integers which indicates a mapping from generation indices to + token indices that will be forced before sampling. The processor will set their log probs to 0 and all other tokens + to `-inf` so that they are sampled at their corresponding index. + + Args: + force_token_map (`list`): + Map giving token ids and indices where they will be forced to be sampled. + """ + + def __init__(self, force_token_map): + force_token_map = dict(force_token_map) + # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the + # index of the array corresponds to the index of the token to be forced, for XLA compatibility. + # Indexes without forced tokens will have a negative value. + force_token_array = jnp.ones((max(force_token_map.keys()) + 1), dtype=jnp.int32) * -1 + for index, token in force_token_map.items(): + if token is not None: + force_token_array = force_token_array.at[index].set(token) + self.force_token_array = jnp.int32(force_token_array) + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + def _force_token(generation_idx): + batch_size = scores.shape[0] + current_token = self.force_token_array[generation_idx] + + new_scores = jnp.ones_like(scores, dtype=scores.dtype) * -float("inf") + updates = jnp.zeros((batch_size, 1), dtype=scores.dtype) + new_scores = lax.dynamic_update_slice(new_scores, updates, (0, current_token)) + return new_scores + + scores = lax.cond( + cur_len >= self.force_token_array.shape[0], + # If the current length is geq than the length of force_token_array, the processor does nothing. + lambda: scores, + # Otherwise, it may force a certain token. + lambda: lax.cond( + self.force_token_array[cur_len] >= 0, + # Only valid (positive) tokens are forced + lambda: _force_token(cur_len), + # Otherwise, the processor does nothing. + lambda: scores, + ), + ) + return scores + + +class FlaxWhisperTimeStampLogitsProcessor(FlaxLogitsProcessor): + r""" + Whisper specific Processor. This processor can be used to force a list of tokens. The processor will set their log + probs to `inf` so that they are sampled at their corresponding index. + + Args: + generate_config (`GenerateConfig`): + The generate config used to generate the output. The following parameters are required: + eos_token_id (`int`, *optional*, defaults to 50257): + The id of the *end-of-sequence* token. + no_timestamps_token_id (`int`, *optional*, defaults to 50363): + The id of the `"<|notimestamps|>"` token. + max_initial_timestamp_index (`int`, *optional*, defaults to 1): + Used to set the maximum value of the initial timestamp. This is used to prevent the model from + predicting timestamps that are too far in the future. + """ + + def __init__(self, generate_config, model_config, decoder_input_length): + self.eos_token_id = generate_config.eos_token_id + self.no_timestamps_token_id = generate_config.no_timestamps_token_id + self.timestamp_begin = generate_config.no_timestamps_token_id + 1 + + self.begin_index = decoder_input_length + 1 + + if generate_config.is_multilingual: + # room for language token and task token + self.begin_index += 2 + if hasattr(generate_config, "max_initial_timestamp_index"): + self.max_initial_timestamp_index = generate_config.max_initial_timestamp_index + else: + self.max_initial_timestamp_index = model_config.vocab_size + if self.max_initial_timestamp_index is None: + self.max_initial_timestamp_index = model_config.vocab_size + + def __call__(self, input_ids, scores, cur_len): + # suppress <|notimestamps|> which is handled by without_timestamps + scores = scores.at[:, self.no_timestamps_token_id].set(-float("inf")) + + def handle_pairs(input_ids_k, scores_k): + last_was_timestamp = jnp.where((cur_len - self.begin_index) >= 1, True, False) + last_was_timestamp = jnp.where( + input_ids_k[cur_len - 1] >= self.timestamp_begin, + True and last_was_timestamp, + False, + ) + + penultimate_was_timestamp = jnp.where((cur_len - self.begin_index) < 2, True, False) + penultimate_was_timestamp = jnp.where( + input_ids_k[cur_len - 2] >= self.timestamp_begin, + True, + penultimate_was_timestamp, + ) + + return jnp.where( + last_was_timestamp, + jnp.where( + penultimate_was_timestamp > 0, + scores_k.at[self.timestamp_begin :].set(-float("inf")), + scores_k.at[: self.eos_token_id].set(-float("inf")), + ), + scores_k, + ) + + scores = jax.vmap(handle_pairs)(input_ids, scores) + + apply_max_initial_timestamp = jnp.where(cur_len == self.begin_index, True, False) + apply_max_initial_timestamp = jnp.where( + self.max_initial_timestamp_index is not None, + True and apply_max_initial_timestamp, + False, + ) + + last_allowed = self.timestamp_begin + self.max_initial_timestamp_index + + scores = jnp.where( + apply_max_initial_timestamp, + scores.at[:, last_allowed + 1 :].set(-float("inf")), + scores, + ) + + # if sum of probability over timestamps is above any other token, sample timestamp + logprobs = jax.nn.log_softmax(scores, axis=-1) + + def handle_cumulative_probs(logprobs_k, scores_k): + timestamp_logprob = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :], axis=-1) + max_text_token_logprob = jnp.max(logprobs_k[: self.timestamp_begin]) + return jnp.where( + timestamp_logprob > max_text_token_logprob, + scores_k.at[: self.timestamp_begin].set(-float("inf")), + scores_k, + ) + + scores = jax.vmap(handle_cumulative_probs)(logprobs, scores) + + return scores + + +class FlaxNoRepeatNGramLogitsProcessor(FlaxLogitsProcessor): + r""" + [`FlaxLogitsProcessor`] that enforces no repetition of n-grams. See + [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345). + + Args: + ngram_size (`int`): + All ngrams of size `ngram_size` can only occur once. + """ + + def __init__(self, ngram_size: int): + if not isinstance(ngram_size, int) or ngram_size <= 0: + raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") + self.ngram_size = ngram_size + + def get_previous_ngrams(self, input_ids: jnp.ndarray, vocab_size: int, cur_len: int): + """ + get a matrix of size (batch_size,) + (vocab_size,)*n (for n-grams) that + represent the n-grams that occured previously. + The BCOO representation allow to store only the few non-zero entries, instead of the full (huge) matrix + """ + batch_size, seq_len = input_ids.shape + # number of n-grams in the whole sequence + seq_ngrams = seq_len - (self.ngram_size - 1) + # number of n-grams in the currently generated sequence + cur_ngrams = cur_len - (self.ngram_size - 1) + + def body_fun(i, val): + b = i % batch_size + pos = i // batch_size + return val.at[i].set( + jnp.array( + [ + b, + ] + + [jnp.array(input_ids)[b, pos + j] for j in range(self.ngram_size)] + ) + ) + + shape = (batch_size * seq_ngrams, self.ngram_size + 1) + all_update_indices = jax.lax.fori_loop( + 0, batch_size * cur_ngrams, body_fun, jnp.zeros(shape, dtype=input_ids.dtype) + ) + + # ignore the n-grams not yet generated + data = (jnp.arange(batch_size * seq_ngrams) < batch_size * cur_ngrams).astype("float32") + + return sparse.BCOO((data, all_update_indices), shape=(batch_size,) + (vocab_size,) * self.ngram_size) + + def get_banned_tokens_mask(self, latest_tokens: jnp.ndarray, previous_ngrams) -> jnp.ndarray: + """ + Determines which tokens must be banned given latest tokens and the previously seen + ngrams. + """ + + @sparse.sparsify + @jax.vmap + def inner_fn(latest_tokens, previous_ngrams): + return previous_ngrams[tuple(latest_tokens)] + + return sparse.bcoo_todense(inner_fn(latest_tokens, previous_ngrams)) + + def __call__(self, input_ids: jnp.ndarray, scores: jnp.ndarray, cur_len: int) -> jnp.ndarray: + def true_fn(): + _, vocab_size = scores.shape + # store the previously seen n-grams + previous_ngrams = self.get_previous_ngrams(input_ids, vocab_size, cur_len) + + # get the n-1 last tokens that prefix the n-gram being generated + latest_tokens = jnp.zeros((input_ids.shape[0], self.ngram_size - 1), dtype=input_ids.dtype) + latest_tokens = jax.lax.dynamic_update_slice( + latest_tokens, + jax.lax.dynamic_slice( + input_ids, (0, cur_len - (self.ngram_size - 1)), (input_ids.shape[0], (self.ngram_size - 1)) + ), + (0, 0), + ) + + # compute the banned tokens, ie all the tokens that when added to the latest tokens lead to a n-gram that was previously generated + banned_tokens_indices_mask = self.get_banned_tokens_mask(latest_tokens, previous_ngrams).astype("bool") + return jnp.where(banned_tokens_indices_mask, -float("inf"), scores) + + output = jax.lax.cond((cur_len >= self.ngram_size - 1), true_fn, lambda: scores) + return output diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/flax_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/flax_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..08480ac983e8055e60fc292d5d6d2dc1e6f6597f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/flax_utils.py @@ -0,0 +1,1022 @@ +# coding=utf-8 +# Copyright 2021 The Google AI Flax Team Authors, and The HuggingFace Inc. team. +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import copy +import inspect +import warnings +from functools import partial +from typing import Any, Dict, Optional, Union + +import flax +import jax +import jax.numpy as jnp +import numpy as np +from jax import lax + +from ..models.auto import ( + FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, +) +from ..utils import ModelOutput, logging +from .configuration_utils import GenerationConfig +from .flax_logits_process import ( + FlaxForcedBOSTokenLogitsProcessor, + FlaxForcedEOSTokenLogitsProcessor, + FlaxForceTokensLogitsProcessor, + FlaxLogitsProcessorList, + FlaxMinLengthLogitsProcessor, + FlaxNoRepeatNGramLogitsProcessor, + FlaxSuppressTokensAtBeginLogitsProcessor, + FlaxSuppressTokensLogitsProcessor, + FlaxTemperatureLogitsWarper, + FlaxTopKLogitsWarper, + FlaxTopPLogitsWarper, +) + + +logger = logging.get_logger(__name__) + + +@flax.struct.dataclass +class FlaxGreedySearchOutput(ModelOutput): + """ + Flax Base class for outputs of decoder-only generation models using greedy search. + + + Args: + sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): + The generated sequences. + """ + + sequences: jnp.ndarray = None + + +@flax.struct.dataclass +class FlaxSampleOutput(ModelOutput): + """ + Flax Base class for outputs of decoder-only generation models using sampling. + + + Args: + sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): + The generated sequences. + """ + + sequences: jnp.ndarray = None + + +@flax.struct.dataclass +class FlaxBeamSearchOutput(ModelOutput): + """ + Flax Base class for outputs of decoder-only generation models using greedy search. + + + Args: + sequences (`jnp.ndarray` of shape `(batch_size, max_length)`): + The generated sequences. + scores (`jnp.ndarray` of shape `(batch_size,)`): + The scores (log probabilities) of the generated sequences. + """ + + sequences: jnp.ndarray = None + scores: jnp.ndarray = None + + +@flax.struct.dataclass +class GreedyState: + cur_len: jnp.ndarray + sequences: jnp.ndarray + running_token: jnp.ndarray + is_sent_finished: jnp.ndarray + model_kwargs: Dict[str, jnp.ndarray] + + +@flax.struct.dataclass +class SampleState: + cur_len: jnp.ndarray + sequences: jnp.ndarray + running_token: jnp.ndarray + is_sent_finished: jnp.ndarray + prng_key: jnp.ndarray + model_kwargs: Dict[str, jnp.ndarray] + + +@flax.struct.dataclass +class BeamSearchState: + cur_len: jnp.ndarray + running_sequences: jnp.ndarray + running_scores: jnp.ndarray + sequences: jnp.ndarray + scores: jnp.ndarray + is_sent_finished: jnp.ndarray + model_kwargs: Dict[str, jnp.ndarray] + + +class FlaxGenerationMixin: + """ + A class containing all functions for auto-regressive text generation, to be used as a mixin in + [`FlaxPreTrainedModel`]. + + The class exposes [`~generation.FlaxGenerationMixin.generate`], which can be used for: + - *greedy decoding* by calling [`~generation.FlaxGenerationMixin._greedy_search`] if `num_beams=1` and + `do_sample=False` + - *multinomial sampling* by calling [`~generation.FlaxGenerationMixin._sample`] if `num_beams=1` and + `do_sample=True` + - *beam-search decoding* by calling [`~generation.FlaxGenerationMixin._beam_search`] if `num_beams>1` and + `do_sample=False` + + You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To + learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). + """ + + def prepare_inputs_for_generation(self, *args, **kwargs): + raise NotImplementedError( + "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`." + ) + + @staticmethod + def _run_loop_in_debug(cond_fn, body_fn, init_state): + """ + Run generation in untraced mode. This should only be used for debugging purposes. + """ + state = init_state + while cond_fn(state): + state = body_fn(state) + return state + + def _prepare_encoder_decoder_kwargs_for_generation(self, input_ids, params, model_kwargs): + encoder_kwargs = { + argument: value + for argument, value in model_kwargs.items() + if not (argument.startswith("decoder_") or argument.startswith("cross_attn")) + } + model_kwargs["encoder_outputs"] = self.encode(input_ids, params=params, return_dict=True, **encoder_kwargs) + return model_kwargs + + def _prepare_decoder_input_ids_for_generation( + self, + batch_size: int, + decoder_start_token_id: int = None, + bos_token_id: int = None, + model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, + ) -> jnp.ndarray: + if model_kwargs is not None and "decoder_input_ids" in model_kwargs: + # Only use this arg if not None, otherwise just remove from model_kwargs + decoder_input_ids = model_kwargs.pop("decoder_input_ids") + if decoder_input_ids is not None: + return decoder_input_ids + decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) + return jnp.array(decoder_start_token_id, dtype="i4").reshape(1, -1).repeat(batch_size, axis=0) + + def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: + # retrieve decoder_start_token_id for encoder-decoder models + # fall back to bos_token_id if necessary + decoder_start_token_id = ( + decoder_start_token_id + if decoder_start_token_id is not None + else self.generation_config.decoder_start_token_id + ) + bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id + if decoder_start_token_id is not None: + return decoder_start_token_id + elif ( + hasattr(self.config, "decoder") + and hasattr(self.config.decoder, "decoder_start_token_id") + and self.config.decoder.decoder_start_token_id is not None + ): + return self.config.decoder.decoder_start_token_id + elif bos_token_id is not None: + return bos_token_id + elif ( + hasattr(self.config, "decoder") + and hasattr(self.config.decoder, "bos_token_id") + and self.config.decoder.bos_token_id is not None + ): + return self.config.decoder.bos_token_id + raise ValueError( + "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." + ) + + @staticmethod + def _expand_to_num_beams(tensor, num_beams): + return jnp.broadcast_to(tensor[:, None], (tensor.shape[0], num_beams) + tensor.shape[1:]) + + def _adapt_logits_for_beam_search(self, logits): + """ + This function can be overwritten in the specific modeling_flax_.py classes to allow for custom beam + search behavior. Note that the only model that overwrites this method is [`~transformes.FlaxMarianMTModel`]. + """ + return logits + + def _validate_model_class(self): + """ + Confirms that the model class is compatible with generation. If not, raises an exception that points to the + right class to use. + """ + if not self.can_generate(): + generate_compatible_mappings = [ + FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, + FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING, + FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + ] + generate_compatible_classes = set() + for model_mapping in generate_compatible_mappings: + supported_models = model_mapping.get(type(self.config), default=None) + if supported_models is not None: + generate_compatible_classes.add(supported_models.__name__) + exception_message = ( + f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " + "it doesn't have a language model head." + ) + if generate_compatible_classes: + exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" + raise TypeError(exception_message) + + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): + """Validates model kwargs for generation. Generate argument typos will also be caught here.""" + unused_model_args = [] + model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) + # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If + # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) + if "kwargs" in model_args or "model_kwargs" in model_args: + model_args |= set(inspect.signature(self.__call__).parameters) + for key, value in model_kwargs.items(): + if value is not None and key not in model_args: + unused_model_args.append(key) + + if unused_model_args: + raise ValueError( + f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" + " generate arguments will also show up in this list)" + ) + + def generate( + self, + input_ids: jnp.ndarray, + generation_config: Optional[GenerationConfig] = None, + prng_key: Optional[jnp.ndarray] = None, + trace: bool = True, + params: Optional[Dict[str, jnp.ndarray]] = None, + logits_processor: Optional[FlaxLogitsProcessorList] = None, + **kwargs, + ): + r""" + Generates sequences of token ids for models with a language modeling head. + + Parameters: + input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + trace (`bool`, *optional*, defaults to `True`): + Whether to trace generation. Setting `trace=False` should only be used for debugging and will lead to a + considerably slower runtime. + params (`Dict[str, jnp.ndarray]`, *optional*): + Optionally the model parameters can be passed. Can be useful for parallelized generation. + logits_processor (`FlaxLogitsProcessorList `, *optional*): + Custom logits processors that complement the default logits processors built from arguments and + generation config. If a logit processor is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder + specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. + + Return: + [`~utils.ModelOutput`]. + + """ + # Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call + self._validate_model_class() + + # priority: `generation_config` argument > `model.generation_config` (the default generation config) + if generation_config is None: + # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior, + # two conditions must be met + # 1) the generation config must have been created from the model config (`_from_model_config` field); + # 2) the generation config must have seen no modification since its creation (the hash is the same). + if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash( + self.generation_config + ): + new_generation_config = GenerationConfig.from_model_config(self.config) + if new_generation_config != self.generation_config: + warnings.warn( + "You have modified the pretrained model configuration to control generation. This is a" + " deprecated strategy to control generation and will be removed soon, in a future version." + " Please use and modify the model generation configuration (see" + " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )" + ) + self.generation_config = new_generation_config + generation_config = self.generation_config + + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs + self._validate_model_kwargs(model_kwargs.copy()) + + logits_processor = logits_processor if logits_processor is not None else FlaxLogitsProcessorList() + + # set init values + prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) + + if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: + if model_kwargs.get("attention_mask") is None: + logger.warning( + "The attention mask and the pad token id were not set. As a consequence, you may observe " + "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." + ) + eos_token_id = generation_config.eos_token_id + if isinstance(eos_token_id, list): + eos_token_id = eos_token_id[0] + logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") + generation_config.pad_token_id = eos_token_id + + if generation_config.decoder_start_token_id is None and self.config.is_encoder_decoder: + raise ValueError("`decoder_start_token_id` has to be defined for encoder-decoder generation.") + + # decoder-only models should use left-padding for generation (can't be checked with `trace=True`) + if not self.config.is_encoder_decoder and not trace: + if ( + generation_config.pad_token_id is not None + and jnp.sum(input_ids[:, -1] == generation_config.pad_token_id) > 0 + ): + logger.warning( + "A decoder-only architecture is being used, but right-padding was detected! For correct " + "generation results, please set `padding_side='left'` when initializing the tokenizer." + ) + + batch_size = input_ids.shape[0] + + if self.config.is_encoder_decoder: + # add encoder_outputs to model_kwargs + if model_kwargs.get("encoder_outputs") is None: + model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, params, model_kwargs) + # prepare decoder_input_ids for generation + input_ids = self._prepare_decoder_input_ids_for_generation( + batch_size, + decoder_start_token_id=generation_config.decoder_start_token_id, + bos_token_id=generation_config.bos_token_id, + model_kwargs=model_kwargs, + ) + + # Prepare `max_length` depending on other stopping criteria. + input_ids_seq_length = input_ids.shape[-1] + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20: + # 20 is the default max_length of the generation config + warnings.warn( + f"Using the model-agnostic default `max_length` (={generation_config.max_length}) " + "to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.", + UserWarning, + ) + elif generation_config.max_new_tokens is not None: + if not has_default_max_length and generation_config.max_length is not None: + logger.warning( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" + ) + generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length + + if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length: + raise ValueError( + f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger than" + f" the maximum length ({generation_config.max_length})" + ) + if input_ids_seq_length >= generation_config.max_length: + input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" + logger.warning( + f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing`max_new_tokens`." + ) + + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_seq_length, + logits_processor=logits_processor, + ) + + if not generation_config.do_sample and generation_config.num_beams == 1: + return self._greedy_search( + input_ids, + generation_config.max_length, + generation_config.pad_token_id, + generation_config.eos_token_id, + logits_processor=logits_processor, + trace=trace, + params=params, + model_kwargs=model_kwargs, + ) + elif generation_config.do_sample and generation_config.num_beams == 1: + logits_warper = self._get_logits_warper(generation_config=generation_config) + return self._sample( + input_ids, + generation_config.max_length, + generation_config.pad_token_id, + generation_config.eos_token_id, + prng_key, + logits_warper=logits_warper, + logits_processor=logits_processor, + trace=trace, + params=params, + model_kwargs=model_kwargs, + ) + elif not generation_config.do_sample and generation_config.num_beams > 1: + # broadcast input_ids & encoder_outputs + input_ids = self._expand_to_num_beams(input_ids, num_beams=generation_config.num_beams) + + if "encoder_outputs" in model_kwargs: + model_kwargs["encoder_outputs"]["last_hidden_state"] = self._expand_to_num_beams( + model_kwargs["encoder_outputs"]["last_hidden_state"], num_beams=generation_config.num_beams + ) + + for kwarg in ["attention_mask", "decoder_attention_mask"]: + if kwarg in model_kwargs: + model_kwargs[kwarg] = self._expand_to_num_beams( + model_kwargs[kwarg], num_beams=generation_config.num_beams + ) + + return self._beam_search( + input_ids, + generation_config.max_length, + generation_config.pad_token_id, + generation_config.eos_token_id, + length_penalty=generation_config.length_penalty, + early_stopping=generation_config.early_stopping, + logits_processor=logits_processor, + trace=trace, + params=params, + num_return_sequences=generation_config.num_return_sequences, + model_kwargs=model_kwargs, + ) + else: + raise NotImplementedError("`Beam sampling is currently not implemented.") + + def _get_logits_warper(self, generation_config: GenerationConfig) -> FlaxLogitsProcessorList: + """ + This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsWarper`] + instances used for multinomial sampling. + """ + warpers = FlaxLogitsProcessorList() + + if generation_config.temperature is not None and generation_config.temperature != 1.0: + warpers.append(FlaxTemperatureLogitsWarper(generation_config.temperature)) + if generation_config.top_k is not None and generation_config.top_k != 0: + warpers.append(FlaxTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=1)) + if generation_config.top_p is not None and generation_config.top_p < 1.0: + warpers.append(FlaxTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=1)) + + return warpers + + def _get_logits_processor( + self, + generation_config: GenerationConfig, + input_ids_seq_length: int, + logits_processor: Optional[FlaxLogitsProcessorList], + ) -> FlaxLogitsProcessorList: + """ + This class returns a [`FlaxLogitsProcessorList`] list object that contains all relevant [`FlaxLogitsProcessor`] + instances used to modify the scores of the language model head. + """ + processors = FlaxLogitsProcessorList() + + if ( + generation_config.min_length is not None + and generation_config.eos_token_id is not None + and generation_config.min_length > -1 + ): + processors.append( + FlaxMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id) + ) + if generation_config.forced_bos_token_id is not None: + processors.append(FlaxForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) + if generation_config.forced_eos_token_id is not None: + processors.append( + FlaxForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) + ) + if generation_config.suppress_tokens is not None: + processors.append(FlaxSuppressTokensLogitsProcessor(generation_config.suppress_tokens)) + if generation_config.begin_suppress_tokens is not None: + begin_index = input_ids_seq_length + begin_index = ( + begin_index + if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) + else begin_index + 1 + ) + if generation_config.forced_decoder_ids is not None and len(generation_config.forced_decoder_ids) > 0: + # generation starts after the last token that is forced + begin_index += generation_config.forced_decoder_ids[-1][0] + processors.append( + FlaxSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) + ) + if generation_config.forced_decoder_ids is not None: + forced_decoder_ids = [ + [input_ids_seq_length + i[0] - 1, i[1]] for i in generation_config.forced_decoder_ids + ] + processors.append(FlaxForceTokensLogitsProcessor(forced_decoder_ids)) + if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0: + processors.append(FlaxNoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size)) + processors = self._merge_criteria_processor_list(processors, logits_processor) + + return processors + + def _merge_criteria_processor_list( + self, + default_list: FlaxLogitsProcessorList, + custom_list: FlaxLogitsProcessorList, + ) -> FlaxLogitsProcessorList: + if len(custom_list) == 0: + return default_list + for default in default_list: + for custom in custom_list: + if type(custom) is type(default): + object_type = "logits processor" + raise ValueError( + f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" + f" `generate`, but it has already been created with the values {default}. {default} has been" + " created by passing the corresponding arguments to generate or by the model's config default" + f" values. If you just want to change the default values of {object_type} consider passing" + f" them as arguments to `generate` instead of using a custom {object_type}." + ) + default_list.extend(custom_list) + return default_list + + def _greedy_search( + self, + input_ids: None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + logits_processor: Optional[FlaxLogitsProcessorList] = None, + trace: bool = True, + params: Optional[Dict[str, jnp.ndarray]] = None, + model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, + ): + # init values + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + + batch_size, cur_len = input_ids.shape + + eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) + pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) + cur_len = jnp.array(cur_len) + + # per batch-item holding current token in loop. + sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) + sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) + + # per batch-item state bit indicating if sentence has finished. + is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) + + # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop + # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. + model = self.decode if self.config.is_encoder_decoder else self + # initialize model specific kwargs + model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs) + + # initialize state + state = GreedyState( + cur_len=cur_len, + sequences=sequences, + running_token=input_ids, + is_sent_finished=is_sent_finished, + model_kwargs=model_kwargs, + ) + + def greedy_search_cond_fn(state): + """state termination condition fn.""" + has_reached_max_length = state.cur_len == max_length + all_sequence_finished = jnp.all(state.is_sent_finished) + finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished) + return ~finish_generation + + def greedy_search_body_fn(state): + """state update fn.""" + model_outputs = model(state.running_token, params=params, **state.model_kwargs) + logits = model_outputs.logits[:, -1] + + # apply min_length, ... + logits = logits_processor(state.sequences, logits, state.cur_len) + + next_token = jnp.argmax(logits, axis=-1) + + next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished + next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id) + next_token = next_token[:, None] + + next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len)) + next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) + return GreedyState( + cur_len=state.cur_len + 1, + sequences=next_sequences, + running_token=next_token, + is_sent_finished=next_is_sent_finished, + model_kwargs=next_model_kwargs, + ) + + # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU + if input_ids.shape[1] > 1: + state = greedy_search_body_fn(state) + + if not trace: + state = self._run_loop_in_debug(greedy_search_cond_fn, greedy_search_body_fn, state) + else: + state = lax.while_loop(greedy_search_cond_fn, greedy_search_body_fn, state) + + return FlaxGreedySearchOutput(sequences=state.sequences) + + def _sample( + self, + input_ids: None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + prng_key: Optional[jnp.ndarray] = None, + logits_processor: Optional[FlaxLogitsProcessorList] = None, + logits_warper: Optional[FlaxLogitsProcessorList] = None, + trace: bool = True, + params: Optional[Dict[str, jnp.ndarray]] = None, + model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, + ): + # init values + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) + + batch_size, cur_len = input_ids.shape + + eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) + pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) + cur_len = jnp.array(cur_len) + + # per batch-item holding current token in loop. + sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) + sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) + + # per batch-item state bit indicating if sentence has finished. + is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) + + # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop + # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. + model = self.decode if self.config.is_encoder_decoder else self + + # initialize model specific kwargs + model_kwargs = self.prepare_inputs_for_generation(input_ids, max_length, **model_kwargs) + + # initialize state + state = SampleState( + cur_len=cur_len, + sequences=sequences, + running_token=input_ids, + is_sent_finished=is_sent_finished, + prng_key=prng_key, + model_kwargs=model_kwargs, + ) + + def sample_search_cond_fn(state): + """state termination condition fn.""" + has_reached_max_length = state.cur_len == max_length + all_sequence_finished = jnp.all(state.is_sent_finished) + finish_generation = jnp.logical_or(has_reached_max_length, all_sequence_finished) + return ~finish_generation + + def sample_search_body_fn(state): + """state update fn.""" + prng_key, prng_key_next = jax.random.split(state.prng_key) + model_outputs = model(state.running_token, params=params, **state.model_kwargs) + + logits = model_outputs.logits[:, -1] + + # apply min_length, ... + logits = logits_processor(state.sequences, logits, state.cur_len) + # apply top_p, top_k, temperature + logits = logits_warper(logits, logits, state.cur_len) + + next_token = jax.random.categorical(prng_key, logits, axis=-1) + + next_token = next_token * ~state.is_sent_finished + pad_token_id * state.is_sent_finished + next_is_sent_finished = state.is_sent_finished | (next_token == eos_token_id) + next_token = next_token[:, None] + + next_sequences = lax.dynamic_update_slice(state.sequences, next_token, (0, state.cur_len)) + next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) + + return SampleState( + cur_len=state.cur_len + 1, + sequences=next_sequences, + running_token=next_token, + is_sent_finished=next_is_sent_finished, + model_kwargs=next_model_kwargs, + prng_key=prng_key_next, + ) + + # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU + if input_ids.shape[1] > 1: + state = sample_search_body_fn(state) + + if not trace: + state = self._run_loop_in_debug(sample_search_cond_fn, sample_search_body_fn, state) + else: + state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state) + + return FlaxSampleOutput(sequences=state.sequences) + + def _beam_search( + self, + input_ids: None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + length_penalty: Optional[float] = None, + early_stopping: Optional[Union[bool, str]] = None, + logits_processor: Optional[FlaxLogitsProcessorList] = None, + trace: bool = True, + params: Optional[Dict[str, jnp.ndarray]] = None, + num_return_sequences: Optional[int] = None, + model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, + ): + """ + This beam search function is heavily inspired by Flax's official example: + https://github.com/google/flax/blob/main/examples/wmt/decode.py + """ + + def flatten_beam_dim(tensor): + """Flattens the first two dimensions of a non-scalar array.""" + # ignore scalars (e.g. cache index) + if tensor.ndim == 0: + return tensor + return tensor.reshape((tensor.shape[0] * tensor.shape[1],) + tensor.shape[2:]) + + def unflatten_beam_dim(tensor, batch_size, num_beams): + """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" + # ignore scalars (e.g. cache index) + if tensor.ndim == 0: + return tensor + return tensor.reshape((batch_size, num_beams) + tensor.shape[1:]) + + def gather_beams(nested, beam_indices, batch_size, new_num_beams): + """ + Gathers the beam slices indexed by beam_indices into new beam array. + """ + batch_indices = jnp.reshape( + jnp.arange(batch_size * new_num_beams) // new_num_beams, (batch_size, new_num_beams) + ) + + def gather_fn(tensor): + # ignore scalars (e.g. cache index) + if tensor.ndim == 0: + return tensor + else: + return tensor[batch_indices, beam_indices] + + return jax.tree_util.tree_map(gather_fn, nested) + + # init values + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty + early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping + num_return_sequences = ( + num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences + ) + + batch_size, num_beams, cur_len = input_ids.shape + + eos_token_id = jnp.array(eos_token_id, dtype=jnp.int32 if eos_token_id is not None else None) + pad_token_id = jnp.array(pad_token_id, dtype=jnp.int32) + cur_len = jnp.array(cur_len) + + # record the prompt length of decoder + decoder_prompt_len = input_ids.shape[-1] + + # per batch,beam-item holding current token in loop. + sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32) + running_sequences = jnp.full((batch_size, num_beams, max_length), pad_token_id, dtype=jnp.int32) + running_sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0, 0)) + + # per batch,beam-item state bit indicating if sentence has finished. + is_sent_finished = jnp.zeros((batch_size, num_beams), dtype=jnp.bool_) + + # per batch,beam-item score, logprobs + running_scores = jnp.tile(jnp.array([0.0] + [np.array(-1.0e7)] * (num_beams - 1)), [batch_size, 1]) + scores = jnp.ones((batch_size, num_beams)) * np.array(-1.0e7) + + # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop + # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. + model = self.decode if self.config.is_encoder_decoder else self + + # flatten beam dim + if "encoder_outputs" in model_kwargs: + model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim( + model_kwargs["encoder_outputs"]["last_hidden_state"] + ) + for kwarg in ["attention_mask", "decoder_attention_mask"]: + if kwarg in model_kwargs: + model_kwargs[kwarg] = flatten_beam_dim(model_kwargs[kwarg]) + + # initialize model specific kwargs + model_kwargs = self.prepare_inputs_for_generation(flatten_beam_dim(input_ids), max_length, **model_kwargs) + + # initialize state + state = BeamSearchState( + cur_len=cur_len, + running_sequences=running_sequences, + running_scores=running_scores, + sequences=sequences, + scores=scores, + is_sent_finished=is_sent_finished, + model_kwargs=model_kwargs, + ) + + def beam_search_cond_fn(state): + """beam search state termination condition fn.""" + + # 1. is less than max length? + not_max_length_yet = state.cur_len < max_length + + # 2. can the new beams still improve? + # early_stopping == False -> apply heuristic = always get the best score from `cur_len`. See the discussion + # below for more details. + # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 + # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of + # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there. + if early_stopping == "never" and length_penalty > 0.0: + best_running_score = state.running_scores[:, :1] / ( + (max_length - decoder_prompt_len) ** length_penalty + ) + else: + best_running_score = state.running_scores[:, :1] / ( + (state.cur_len - decoder_prompt_len) ** length_penalty + ) + worst_finished_score = jnp.where( + state.is_sent_finished, jnp.min(state.scores, axis=1, keepdims=True), np.array(-1.0e7) + ) + improvement_still_possible = jnp.any(best_running_score > worst_finished_score) + + # 3. is there still a beam that has not finished? + still_open_beam = ~(jnp.all(state.is_sent_finished) & (early_stopping is True)) + + return not_max_length_yet & still_open_beam & improvement_still_possible + + def beam_search_body_fn(state, input_ids_length=1): + """beam search state update fn.""" + # 1. Forward current tokens + # Collect the current position slice along length to feed the fast + # autoregressive decoder model. Flatten the beam dimension into batch + # dimension for feeding into the model. + # unflatten beam dimension + # Unflatten beam dimension in attention cache arrays + input_token = flatten_beam_dim( + lax.dynamic_slice( + state.running_sequences, + (0, 0, state.cur_len - input_ids_length), + (batch_size, num_beams, input_ids_length), + ) + ) + model_outputs = model(input_token, params=params, **state.model_kwargs) + + logits = unflatten_beam_dim(model_outputs.logits[:, -1], batch_size, num_beams) + cache = jax.tree_util.tree_map( + lambda tensor: unflatten_beam_dim(tensor, batch_size, num_beams), model_outputs.past_key_values + ) + + # adapt logits for FlaxMarianMTModel + logits = self._adapt_logits_for_beam_search(logits) + + # 2. Compute log probs + # get log probabilities from logits, + # process logits with processors (*e.g.* min_length, ...), and + # add new logprobs to existing running logprobs scores. + log_probs = jax.nn.log_softmax(logits) + log_probs = logits_processor( + flatten_beam_dim(state.running_sequences), flatten_beam_dim(log_probs), state.cur_len + ) + log_probs = unflatten_beam_dim(log_probs, batch_size, num_beams) + log_probs = log_probs + jnp.expand_dims(state.running_scores, axis=2) + vocab_size = log_probs.shape[2] + log_probs = log_probs.reshape((batch_size, num_beams * vocab_size)) + + # 3. Retrieve top-K + # Each item in batch has num_beams * vocab_size candidate sequences. + # For each item, get the top 2*k candidates with the highest log- + # probabilities. We gather the top 2*K beams here so that even if the best + # K sequences reach EOS simultaneously, we have another K sequences + # remaining to continue the live beam search. + # Gather the top 2*K scores from _all_ beams. + # Gather 2*k top beams. + # Recover the beam index by floor division. + # Recover token id by modulo division and expand Id array for broadcasting. + # Update sequences for the 2*K top-k new sequences. + beams_to_keep = 2 * num_beams + topk_log_probs, topk_indices = lax.top_k(log_probs, k=beams_to_keep) + topk_beam_indices = topk_indices // vocab_size + topk_running_sequences = gather_beams( + state.running_sequences, topk_beam_indices, batch_size, beams_to_keep + ) + topk_ids = jnp.expand_dims(topk_indices % vocab_size, axis=2) + topk_sequences = lax.dynamic_update_slice(topk_running_sequences, topk_ids, (0, 0, state.cur_len)) + + # 4. Check which sequences have ended + # Update current sequences: + # Did any of these sequences reach an end marker? + # To prevent these just finished sequences from being added to the current sequences + # set of active beam search sequences, set their log probs to a very large + # negative value. + did_topk_just_finished = topk_sequences[:, :, state.cur_len] == eos_token_id + running_topk_log_probs = topk_log_probs + did_topk_just_finished * np.array(-1.0e7) + # 5. Get running sequences scores for next + # Determine the top k beam indices (from top 2*k beams) from log probs + # and gather top k beams (from top 2*k beams). + next_topk_indices = lax.top_k(running_topk_log_probs, k=num_beams)[1] + next_running_sequences, next_running_scores = gather_beams( + [topk_sequences, running_topk_log_probs], next_topk_indices, batch_size, num_beams + ) + + # 6. Process topk logits + # Further process log probs: + # - add length penalty + # - make sure no scores can be added anymore if beam is full + # - make sure still running sequences cannot be chosen as finalized beam + topk_log_probs = topk_log_probs / ((state.cur_len + 1 - decoder_prompt_len) ** length_penalty) + beams_in_batch_are_full = jnp.broadcast_to( + state.is_sent_finished.all(axis=-1, keepdims=True), did_topk_just_finished.shape + ) & (early_stopping is True) + add_penalty = ~did_topk_just_finished | beams_in_batch_are_full + topk_log_probs += add_penalty * np.array(-1.0e7) + + # 7. Get scores, sequences, is sentence finished for next. + # Combine sequences, scores, and flags along the beam dimension and compare + # new finished sequence scores to existing finished scores and select the + # best from the new set of beams + merged_sequences = jnp.concatenate([state.sequences, topk_sequences], axis=1) + merged_scores = jnp.concatenate([state.scores, topk_log_probs], axis=1) + merged_is_sent_finished = jnp.concatenate([state.is_sent_finished, did_topk_just_finished], axis=1) + topk_merged_indices = lax.top_k(merged_scores, k=num_beams)[1] + next_sequences, next_scores, next_is_sent_finished = gather_beams( + [merged_sequences, merged_scores, merged_is_sent_finished], topk_merged_indices, batch_size, num_beams + ) + + # 8. Update model kwargs. + # Determine the top k beam indices from the original set of all beams. + # With these, gather the top k beam-associated caches. + next_running_indices = gather_beams(topk_beam_indices, next_topk_indices, batch_size, num_beams) + next_cache = gather_beams(cache, next_running_indices, batch_size, num_beams) + model_outputs["past_key_values"] = jax.tree_util.tree_map(lambda x: flatten_beam_dim(x), next_cache) + next_model_kwargs = self.update_inputs_for_generation(model_outputs, state.model_kwargs) + + return BeamSearchState( + cur_len=state.cur_len + 1, + running_scores=next_running_scores, + running_sequences=next_running_sequences, + scores=next_scores, + sequences=next_sequences, + is_sent_finished=next_is_sent_finished, + model_kwargs=next_model_kwargs, + ) + + # Always run first iteration outside of `lax.while_loop` to avoid calling `beam_search_cond_fn` + # when `state.cur_len` equals `decoder_prompt_len`. This also helps to comply with TPU when + # the very first prompt has sequence length > 1. + state = partial(beam_search_body_fn, input_ids_length=input_ids.shape[-1])(state) + + if not trace: + state = self._run_loop_in_debug(beam_search_cond_fn, beam_search_body_fn, state) + else: + state = lax.while_loop(beam_search_cond_fn, beam_search_body_fn, state) + + # Account for the edge-case where there are no finished sequences for a + # particular batch item. If so, return running sequences for that batch item. + none_finished = jnp.any(state.is_sent_finished, axis=1) + sequences = jnp.where(none_finished[:, None, None], state.sequences, state.running_sequences) + scores = jnp.where(none_finished[:, None], state.scores, state.running_scores) + + # Take best beams for each batch (the score is sorted in descending order) + sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :]) + scores = flatten_beam_dim(scores[:, :num_return_sequences]) + + return FlaxBeamSearchOutput(sequences=sequences, scores=scores) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/logits_process.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/logits_process.py new file mode 100644 index 0000000000000000000000000000000000000000..ce91e8a40a4e21da3f490d1f074c272b3cecac4e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/logits_process.py @@ -0,0 +1,2221 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Inc. team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +import math +import warnings +from typing import Callable, Dict, Iterable, List, Optional, Tuple, Union + +import numpy as np +import torch + +from ..utils import add_start_docstrings +from ..utils.logging import get_logger + + +logger = get_logger(__name__) + + +LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam + search or log softmax for each vocabulary token when using beam search + + Return: + `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. + +""" + + +class LogitsProcessor: + """Abstract base class for all logit processors that can be applied during generation.""" + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class LogitsWarper: + """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class LogitsProcessorList(list): + """ + This class can be used to create a list of [`LogitsProcessor`] or [`LogitsWarper`] to subsequently process a + `scores` input tensor. This class inherits from list and adds a specific *__call__* method to apply each + [`LogitsProcessor`] or [`LogitsWarper`] to the inputs. + """ + + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.FloatTensor: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using + beam search or log softmax for each vocabulary token when using beam search + kwargs (`Dict[str, Any]`, *optional*): + Additional kwargs that are specific to a logits processor. + + Return: + `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: + The processed prediction scores. + + """ + for processor in self: + function_args = inspect.signature(processor.__call__).parameters + if len(function_args) > 2: + if not all(arg in kwargs for arg in list(function_args.keys())[2:]): + raise ValueError( + f"Make sure that all the required parameters: {list(function_args.keys())} for " + f"{processor.__class__} are passed to the logits processor." + ) + scores = processor(input_ids, scores, **kwargs) + else: + scores = processor(input_ids, scores) + + return scores + + +class MinLengthLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] enforcing a min-length by setting EOS probability to 0. Note that, for decoder-only models + like most LLMs, the length includes the prompt. + + Args: + min_length (`int`): + The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. + eos_token_id (`Union[int, List[int]]`): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + + Examples: + + ```python + >>> from transformers import AutoModelForCausalLM, AutoTokenizer + + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer("A number:", return_tensors="pt") + >>> gen_out = model.generate(**inputs) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + A number: one + + >>> # setting `min_length` to a value smaller than the uncontrolled output length has no impact + >>> gen_out = model.generate(**inputs, min_length=3) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + A number: one + + >>> # setting a larger `min_length` will force the model to generate beyond its natural ending point, which is not + >>> # necessarily incorrect + >>> gen_out = model.generate(**inputs, min_length=10) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + A number: one thousand, nine hundred and ninety-four + ``` + """ + + def __init__(self, min_length: int, eos_token_id: Union[int, List[int]]): + if not isinstance(min_length, int) or min_length < 0: + raise ValueError(f"`min_length` has to be a non-negative integer, but is {min_length}") + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + if not all(isinstance(i, int) for i in eos_token_id) or any(i < 0 for i in eos_token_id): + logger.warning(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}") + + self.min_length = min_length + self.eos_token_id = eos_token_id + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + vocab_tensor = torch.arange(scores.shape[-1], device=scores.device) + eos_token_id = torch.tensor(self.eos_token_id, device=scores.device) + eos_token_mask = torch.isin(vocab_tensor, eos_token_id) + scores_processed = scores.clone() + if input_ids.shape[-1] < self.min_length: + scores_processed = torch.where(eos_token_mask, -math.inf, scores) + return scores_processed + + +class MinNewTokensLengthLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] enforcing a min-length of new tokens by setting EOS (End-Of-Sequence) token probability to 0. + Contrarily to [`MinLengthLogitsProcessor`], this processor ignores the prompt. + + Args: + prompt_length_to_skip (`int`): + The input tokens length. Not a valid argument when used with `generate` as it will automatically assign the + input length. + min_new_tokens (`int`): + The minimum *new* tokens length below which the score of `eos_token_id` is set to `-float("Inf")`. + eos_token_id (`Union[int, List[int]]`): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + + Examples: + + ```python + >>> from transformers import AutoModelForCausalLM, AutoTokenizer + + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer(["A number:"], return_tensors="pt") + >>> gen_out = model.generate(**inputs) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + A number: one + + >>> # setting `min_new_tokens` will force the model to generate beyond its natural ending point, which is not + >>> # necessarily incorrect + >>> gen_out = model.generate(**inputs, min_new_tokens=2) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + A number: one thousand + ``` + """ + + def __init__(self, prompt_length_to_skip: int, min_new_tokens: int, eos_token_id: Union[int, List[int]]): + for arg_name, arg_value in [ + ("prompt_length_to_skip", prompt_length_to_skip), + ("min_new_tokens", min_new_tokens), + ]: + if not isinstance(arg_value, int) or arg_value < 0: + raise ValueError(f"`{arg_name}` has to be a positive integer, but is {arg_value}") + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + if not all(isinstance(i, int) for i in eos_token_id) or any(i < 0 for i in eos_token_id): + logger.warning(f"`eos_token_id` has to be a list of positive integers, but is {eos_token_id}") + + self.prompt_length_to_skip = prompt_length_to_skip + self.min_new_tokens = min_new_tokens + self.eos_token_id = eos_token_id + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + new_tokens_length = input_ids.shape[-1] - self.prompt_length_to_skip + scores_processed = scores.clone() + vocab_tensor = torch.arange(scores.shape[-1], device=scores.device) + eos_token_id = torch.tensor(self.eos_token_id, device=scores.device) + eos_token_mask = torch.isin(vocab_tensor, eos_token_id) + if new_tokens_length < self.min_new_tokens: + scores_processed = torch.where(eos_token_mask, -math.inf, scores) + + return scores_processed + + +class TemperatureLogitsWarper(LogitsWarper): + r""" + [`LogitsWarper`] for temperature (exponential scaling output probability distribution), which effectively means + that it can control the randomness of the predicted tokens. Often used together with [`TopPLogitsWarper`] and + [`TopKLogitsWarper`]. + + + + Make sure that `do_sample=True` is included in the `generate` arguments otherwise the temperature value won't have + any effect. + + + + Args: + temperature (`float`): + Strictly positive float value used to modulate the logits distribution. A value smaller than `1` decreases + randomness (and vice versa), with `0` being equivalent to shifting all probability mass to the most likely + token. + + Examples: + + ```python + >>> import torch + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> set_seed(0) # for reproducibility + + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") + >>> model.config.pad_token_id = model.config.eos_token_id + >>> inputs = tokenizer(["Hugging Face Company is"], return_tensors="pt") + + >>> # With temperature=1.0, the default, we consistently get random outputs due to random sampling. + >>> generate_kwargs = {"max_new_tokens": 10, "do_sample": True, "temperature": 1.0, "num_return_sequences": 2} + >>> outputs = model.generate(**inputs, **generate_kwargs) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) + ['Hugging Face Company is one of these companies that is going to take a', + "Hugging Face Company is a brand created by Brian A. O'Neil"] + + >>> # However, with temperature close to 0, it approximates greedy decoding strategies (invariant) + >>> generate_kwargs["temperature"] = 0.0001 + >>> outputs = model.generate(**inputs, **generate_kwargs) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)) + ['Hugging Face Company is a company that has been around for over 20 years', + 'Hugging Face Company is a company that has been around for over 20 years'] + ``` + """ + + def __init__(self, temperature: float): + if not isinstance(temperature, float) or not (temperature > 0): + except_msg = ( + f"`temperature` (={temperature}) has to be a strictly positive float, otherwise your next token " + "scores will be invalid." + ) + if isinstance(temperature, float) and temperature == 0.0: + except_msg += " If you're looking for greedy decoding strategies, set `do_sample=False`." + raise ValueError(except_msg) + + self.temperature = temperature + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + scores_processed = scores / self.temperature + return scores_processed + + +class RepetitionPenaltyLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that prevents the repetition of previous tokens through a penalty. This penalty is applied at + most once per token. Note that, for decoder-only models like most LLMs, the considered tokens include the prompt. + + In the original [paper](https://arxiv.org/pdf/1909.05858.pdf), the authors suggest the use of a penalty of around + 1.2 to achieve a good balance between truthful generation and lack of repetition. To penalize and reduce + repetition, use `penalty` values above 1.0, where a higher value penalizes more strongly. To reward and encourage + repetition, use `penalty` values between 0.0 and 1.0, where a lower value rewards more strongly. + + Args: + penalty (`float`): + The parameter for repetition penalty. 1.0 means no penalty. Above 1.0 penalizes previously generated + tokens. Between 0.0 and 1.0 rewards previously generated tokens. + + Examples: + + ```py + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> # Initializing the model and tokenizer for it + >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") + >>> inputs = tokenizer(["I'm not going to"], return_tensors="pt") + + >>> # This shows a normal generate without any specific parameters + >>> summary_ids = model.generate(**inputs) + >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True)[0]) + I'm not going to be able to do that. I'm going to be able to do that + + >>> # This generates a penalty for repeated tokens + >>> penalized_ids = model.generate(**inputs, repetition_penalty=1.1) + >>> print(tokenizer.batch_decode(penalized_ids, skip_special_tokens=True)[0]) + I'm not going to be able to do that. I'll just have to go out and play + ``` + """ + + def __init__(self, penalty: float): + if not isinstance(penalty, float) or not (penalty > 0): + raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") + + self.penalty = penalty + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + score = torch.gather(scores, 1, input_ids) + + # if score < 0 then repetition penalty has to be multiplied to reduce the token probabilities + score = torch.where(score < 0, score * self.penalty, score / self.penalty) + + scores_processed = scores.scatter(1, input_ids, score) + return scores_processed + + +class EncoderRepetitionPenaltyLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that works similarly to [`RepetitionPenaltyLogitsProcessor`], but with an *inverse* penalty + that is applied to the tokens present in the prompt. In other words, a penalty above 1.0 increases the odds of + selecting tokens that were present in the prompt. + + It was designed to avoid hallucination in input-grounded tasks, like summarization. Although originally intended + for encoder-decoder models, it can also be used with decoder-only models like LLMs. + + Args: + penalty (`float`): + The parameter for repetition penalty. 1.0 means no penalty. Above 1.0 rewards prompt tokens. Between 0.0 + and 1.0 penalizes prompt tokens. + encoder_input_ids (`torch.LongTensor`): + The encoder_input_ids that should be repeated within the decoder ids. + + Examples: + + ```python + >>> from transformers import AutoModelForCausalLM, AutoTokenizer + + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer(["Alice and Bob. The third member's name was"], return_tensors="pt") + >>> gen_out = model.generate(**inputs) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + Alice and Bob. The third member's name was not mentioned. + + >>> # With the `encoder_repetition_penalty` argument we can trigger this logits processor in `generate`, which can + >>> # promote the use of prompt tokens ("Bob" in this example) + >>> gen_out = model.generate(**inputs, encoder_repetition_penalty=1.2) + >>> print(tokenizer.batch_decode(gen_out, skip_special_tokens=True)[0]) + Alice and Bob. The third member's name was Bob. The third member's name was Bob. + ``` + """ + + def __init__(self, penalty: float, encoder_input_ids: torch.LongTensor): + if not isinstance(penalty, float) or not (penalty > 0): + raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") + + self.penalty = 1 / penalty + self.encoder_input_ids = encoder_input_ids + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + score = torch.gather(scores, 1, self.encoder_input_ids) + + # if score < 0 then hallucination penalty has to be multiplied to increase the token probabilities + score = torch.where(score < 0, score * self.penalty, score / self.penalty) + + scores_processed = scores.scatter(1, self.encoder_input_ids, score) + return scores_processed + + +class TopPLogitsWarper(LogitsWarper): + """ + [`LogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. Often + used together with [`TemperatureLogitsWarper`] and [`TopKLogitsWarper`]. + + Args: + top_p (`float`): + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> set_seed(1) + >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") + + >>> inputs = tokenizer("A sequence: 1, 2", return_tensors="pt") + + >>> # With sampling, the output is unexpected -- sometimes too unexpected. + >>> outputs = model.generate(**inputs, do_sample=True) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 3 | < 4 (left-hand pointer) ; + + + + >>> # With `top_p` sampling, the output gets restricted to high-probability tokens. + >>> # Pro tip: In practice, LLMs use `top_p` in the 0.9-0.95 range. + >>> outputs = model.generate(**inputs, do_sample=True, top_p=0.1) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 3, 4, 5, 6, 7, 8, 9 + ``` + """ + + def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + top_p = float(top_p) + if top_p < 0 or top_p > 1.0: + raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") + if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): + raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") + + self.top_p = top_p + self.filter_value = filter_value + self.min_tokens_to_keep = min_tokens_to_keep + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + sorted_logits, sorted_indices = torch.sort(scores, descending=False) + cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) + + # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) + sorted_indices_to_remove = cumulative_probs <= (1 - self.top_p) + # Keep at least min_tokens_to_keep + sorted_indices_to_remove[..., -self.min_tokens_to_keep :] = 0 + + # scatter sorted tensors to original indexing + indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) + scores_processed = scores.masked_fill(indices_to_remove, self.filter_value) + return scores_processed + + +class TopKLogitsWarper(LogitsWarper): + r""" + [`LogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. Often used together + with [`TemperatureLogitsWarper`] and [`TopPLogitsWarper`]. + + Args: + top_k (`int`): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> set_seed(1) + >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") + + >>> inputs = tokenizer("A sequence: A, B, C, D", return_tensors="pt") + + >>> # With sampling, the output is unexpected -- sometimes too unexpected. + >>> outputs = model.generate(**inputs, do_sample=True) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: A, B, C, D, E — S — O, P — R + + >>> # With `top_k` sampling, the output gets restricted the k most likely tokens. + >>> # Pro tip: In practice, LLMs use `top_k` in the 5-50 range. + >>> outputs = model.generate(**inputs, do_sample=True, top_k=2) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: A, B, C, D, E, F, G, H, I + ``` + """ + + def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + if not isinstance(top_k, int) or top_k <= 0: + raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") + + self.top_k = max(top_k, min_tokens_to_keep) + self.filter_value = filter_value + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + top_k = min(self.top_k, scores.size(-1)) # Safety check + # Remove all tokens with a probability less than the last token of the top-k + indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None] + scores_processed = scores.masked_fill(indices_to_remove, self.filter_value) + return scores_processed + + +class TypicalLogitsWarper(LogitsWarper): + r""" + [`LogitsWarper`] that performs typical decoding. Inspired on how humans use language, it prioritizes tokens whose + log probability is close to the entropy of the token probability distribution. This means that the most likely + tokens may be discarded in the process. + + See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information. + + Args: + mass (`float`, *optional*, defaults to 0.9): + Value of typical_p between 0 and 1 inclusive, defaults to 0.9. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer("1, 2, 3", return_tensors="pt") + + >>> # We can see that greedy decoding produces a sequence of numbers + >>> outputs = model.generate(**inputs) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, + + >>> # For this particular seed, we can see that sampling produces nearly the same low-information (= low entropy) + >>> # sequence + >>> set_seed(18) + >>> outputs = model.generate(**inputs, do_sample=True) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + 1, 2, 3, 4, 5, 6, 7, 8, 9 and 10 + + >>> # With `typical_p` set, the most obvious sequence is no longer produced, which may be good for your problem + >>> set_seed(18) + >>> outputs = model.generate( + ... **inputs, do_sample=True, typical_p=0.1, return_dict_in_generate=True, output_scores=True + ... ) + >>> print(tokenizer.batch_decode(outputs.sequences, skip_special_tokens=True)[0]) + 1, 2, 3 and 5 + + >>> # We can see that the token corresponding to "4" (token 934) in the second position, the most likely token + >>> # as seen with greedy decoding, was entirely blocked out + >>> print(outputs.scores[1][0, 934]) + tensor(-inf) + ``` + """ + + def __init__(self, mass: float = 0.9, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + mass = float(mass) + if not (mass > 0 and mass < 1): + raise ValueError(f"`typical_p` has to be a float > 0 and < 1, but is {mass}") + if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): + raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") + + self.filter_value = filter_value + self.mass = mass + self.min_tokens_to_keep = min_tokens_to_keep + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # calculate entropy + normalized = torch.nn.functional.log_softmax(scores, dim=-1) + p = torch.exp(normalized) + ent = -(normalized * p).nansum(-1, keepdim=True) + + # shift and sort + shifted_scores = torch.abs((-normalized) - ent) + sorted_scores, sorted_indices = torch.sort(shifted_scores, descending=False) + sorted_logits = scores.gather(-1, sorted_indices) + cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) + + # Remove tokens with cumulative mass above the threshold + last_ind = (cumulative_probs < self.mass).sum(dim=1) + last_ind.clamp_(max=sorted_scores.shape[-1] - 1) + sorted_indices_to_remove = sorted_scores > sorted_scores.gather(1, last_ind.view(-1, 1)) + sorted_indices_to_remove[..., : self.min_tokens_to_keep] = 0 + indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) + + scores_processed = scores.masked_fill(indices_to_remove, self.filter_value) + return scores_processed + + +class EpsilonLogitsWarper(LogitsWarper): + r""" + [`LogitsWarper`] that performs epsilon-sampling, i.e. restricting to tokens with `prob >= epsilon`. Takes the + largest min_tokens_to_keep tokens if no tokens satisfy this constraint. See [Truncation Sampling as Language Model + Desmoothing](https://arxiv.org/abs/2210.15191) for more information. + + Args: + epsilon (`float`): + If set to > 0, only the most tokens with probabilities `epsilon` or higher are kept for generation. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + + Examples: + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> set_seed(1) + >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") + + >>> inputs = tokenizer("A sequence: 1, 2", return_tensors="pt") + + >>> # With sampling, the output is unexpected -- sometimes too unexpected. + >>> outputs = model.generate(**inputs, do_sample=True) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 3 | < 4 (left-hand pointer) ; + + + + >>> # With epsilon sampling, the output gets restricted to high-probability tokens. Note that this is similar to + >>> # Top P sampling, which restricts tokens based on their cumulative probability. + >>> # Pro tip: The paper recomends using `epsilon_cutoff` values between 3e-4 and 9e-4 + >>> outputs = model.generate(**inputs, do_sample=True, epsilon_cutoff=0.1) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 3, 4, 5, 6, 7, 8, 9 + ``` + """ + + def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + epsilon = float(epsilon) + if epsilon <= 0 or epsilon >= 1: + raise ValueError(f"`epsilon_cutoff` has to be a float > 0 and < 1, but is {epsilon}") + + min_tokens_to_keep = int(min_tokens_to_keep) + if min_tokens_to_keep < 1: + raise ValueError( + f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}" + ) + + self.epsilon = epsilon + self.filter_value = filter_value + self.min_tokens_to_keep = min_tokens_to_keep + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # Determine which indices to remove + probabilities = scores.softmax(dim=-1) + indices_to_remove = probabilities < self.epsilon + + # Keep the words with the 'min_tokens_to_keep'-highest probabilities + top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check + indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None]) + + scores_processed = scores.masked_fill(indices_to_remove, self.filter_value) + return scores_processed + + +class EtaLogitsWarper(LogitsWarper): + r""" + [`LogitsWarper`] that performs eta-sampling, a technique to filter out tokens with probabilities below a dynamic + cutoff value, `eta`, which is calculated based on a combination of the hyperparameter `epsilon` and the entropy of + the token probabilities, i.e. `eta := min(epsilon, sqrt(epsilon * e^-entropy(probabilities)))`. Takes the largest + min_tokens_to_keep tokens if no tokens satisfy this constraint. It addresses the issue of poor quality in long + samples of text generated by neural language models leading to more coherent and fluent text. See [Truncation + Sampling as Language Model Desmoothing](https://arxiv.org/abs/2210.15191) for more information. Note: `do_sample` + must be set to `True` for this `LogitsWarper` to work. + + + Args: + epsilon (`float`): + A float value in the range (0, 1). Hyperparameter used to calculate the dynamic cutoff value, `eta`. The + suggested values from the paper ranges from 3e-4 to 4e-3 depending on the size of the model. + filter_value (`float`, *optional*, defaults to -inf): + All values that are found to be below the dynamic cutoff value, `eta`, are set to this float value. This + parameter is useful when logits need to be modified for very low probability tokens that should be excluded + from generation entirely. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Specifies the minimum number of tokens that must be kept for generation, regardless of their probabilities. + For example, if `min_tokens_to_keep` is set to 1, at least one token will always be kept for generation, + even if all tokens have probabilities below the cutoff `eta`. + + Examples: + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> set_seed(1) + >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") + + >>> inputs = tokenizer("A sequence: 1, 2", return_tensors="pt") + + >>> # With sampling, the output is unexpected -- sometimes too unexpected. + >>> outputs = model.generate(**inputs, do_sample=True) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 3 | < 4 (left-hand pointer) ; + + + + >>> # With eta sampling, the output gets restricted to high-probability tokens. You can see it as a dynamic form of + >>> # epsilon sampling that adapts its cutoff probability based on the entropy (high entropy = lower cutoff). + >>> # Pro tip: The paper recomends using `eta_cutoff` values between 3e-4 to 4e-3 + >>> outputs = model.generate(**inputs, do_sample=True, eta_cutoff=0.1) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + A sequence: 1, 2, 3, 4, 5, 6, 7, 8, 9 + ``` + """ + + def __init__(self, epsilon: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + epsilon = float(epsilon) + if epsilon <= 0 or epsilon >= 1: + raise ValueError(f"`eta_cutoff` has to be a float > 0 and < 1, but is {epsilon}") + + min_tokens_to_keep = int(min_tokens_to_keep) + if min_tokens_to_keep < 1: + raise ValueError( + f"`min_tokens_to_keep` has to be a strictly positive integer, but is {min_tokens_to_keep}" + ) + + self.epsilon = torch.tensor(epsilon) + self.filter_value = filter_value + self.min_tokens_to_keep = min_tokens_to_keep + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # Calculate the adaptive cutoff + probabilities = scores.softmax(dim=-1) + entropy = torch.distributions.Categorical(logits=scores).entropy() + eta = torch.min(self.epsilon, torch.sqrt(self.epsilon) * torch.exp(-entropy))[..., None] + indices_to_remove = probabilities < eta + + # Keep the words with the 'min_tokens_to_keep'-highest probabilities + top_k = min(self.min_tokens_to_keep, scores.size(-1)) # Safety check + indices_to_remove = indices_to_remove & (scores < torch.topk(scores, top_k)[0][..., -1, None]) + + scores_processed = scores.masked_fill(indices_to_remove, self.filter_value) + return scores_processed + + +def _get_ngrams(ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int): + """ + Assume ngram_size=2 and prev_input_ids=tensor([[40, 2883, 2712, 4346]]). The output of generated ngrams look like + this {(40,): [2883], (2883,): [2712], (2712,): [4346]}. + + Args: + ngram_size (`int`): + The number sequential tokens taken as a group which may only occur once before being banned. + prev_input_ids (`torch.Tensor`): + Generated token ids for the current hypothesis. + num_hypos (`int`): + The number of hypotheses for which n-grams need to be generated. + + Returns: + generated_ngrams (`dict`): + Dictionary of generated ngrams. + """ + # Initialize an empty list of dictionaries, one for each hypothesis (index) in the range of num_hypos + generated_ngrams = [{} for _ in range(num_hypos)] + for idx in range(num_hypos): + gen_tokens = prev_input_ids[idx].tolist() + generated_ngram = generated_ngrams[idx] + # Loop through each n-gram of size ngram_size in the list of tokens (gen_tokens) + for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]): + prev_ngram_tuple = tuple(ngram[:-1]) + generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] + return generated_ngrams + + +def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len): + """ + Determines the banned tokens for the current hypothesis based on previously generated n-grams. + + Args: + banned_ngrams (`dict`): + A dictionary containing previously generated n-grams for each hypothesis. + prev_input_ids (`torch.Tensor`): + Generated token ids for the current hypothesis. + ngram_size (`int`): + The number sequential tokens taken as a group which may only occur once before being banned. + cur_len (`int`): + The current length of the token sequences for which the n-grams are being checked. + + Returns: + List of tokens that are banned. + """ + # Before decoding the next token, prevent decoding of ngrams that have already appeared + start_idx = cur_len + 1 - ngram_size + ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist()) + return banned_ngrams.get(ngram_idx, []) + + +def _calc_banned_ngram_tokens( + ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int, cur_len: int +) -> List[Iterable[int]]: + """Copied from fairseq for no_repeat_ngram in beam_search""" + if cur_len + 1 < ngram_size: + # return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet + return [[] for _ in range(num_hypos)] + generated_ngrams = _get_ngrams(ngram_size, prev_input_ids, num_hypos) + banned_tokens = [ + _get_generated_ngrams(generated_ngrams[hypo_idx], prev_input_ids[hypo_idx], ngram_size, cur_len) + for hypo_idx in range(num_hypos) + ] + return banned_tokens + + +class NoRepeatNGramLogitsProcessor(LogitsProcessor): + r""" + N-grams are groups of "n" consecutive words, characters, or tokens taken from a sequence of text. Given the + sentence: "She runs fast", the bi-grams (n=2) would be ("she", "runs") and ("runs", "fast"). In text generation, + avoiding repetitions of word sequences provides a more diverse output. This [`LogitsProcessor`] enforces no + repetition of n-grams by setting the scores of banned tokens to negative infinity which eliminates those tokens + from consideration when further processing the scores. Note that, for decoder-only models like most LLMs, the + prompt is also considered to obtain the n-grams. + [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345). + + + + Use n-gram penalties with care. For instance, penalizing 2-grams (bigrams) in an article about the city of New York + might lead to undesirable outcomes where the city's name appears only once in the entire text. + [Reference](https://huggingface.co/blog/how-to-generate) + + + + Args: + ngram_size (`int`): + All ngrams of size `ngram_size` can only occur once. + + Examples: + + ```py + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") + >>> inputs = tokenizer(["Today I"], return_tensors="pt") + + >>> output = model.generate(**inputs) + >>> print(tokenizer.decode(output[0], skip_special_tokens=True)) + Today I’m not sure if I’m going to be able to do it. + + >>> # Now let's add ngram size using `no_repeat_ngram_size`. This stops the repetitions ("I’m") in the output. + >>> output = model.generate(**inputs, no_repeat_ngram_size=2) + >>> print(tokenizer.decode(output[0], skip_special_tokens=True)) + Today I’m not sure if I can get a better understanding of the nature of this issue + ``` + """ + + def __init__(self, ngram_size: int): + if not isinstance(ngram_size, int) or ngram_size <= 0: + raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") + self.ngram_size = ngram_size + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + num_batch_hypotheses = scores.shape[0] + cur_len = input_ids.shape[-1] + scores_processed = scores.clone() + banned_batch_tokens = _calc_banned_ngram_tokens(self.ngram_size, input_ids, num_batch_hypotheses, cur_len) + for i, banned_tokens in enumerate(banned_batch_tokens): + scores_processed[i, banned_tokens] = -float("inf") + + return scores_processed + + +class EncoderNoRepeatNGramLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that works similarly to [`NoRepeatNGramLogitsProcessor`], but applied exclusively to prevent + the repetition of n-grams present in the prompt. + + It was designed to promote chattiness in a language model, by preventing the generation of n-grams present in + previous conversation rounds. + + Args: + encoder_ngram_size (`int`): + All ngrams of size `ngram_size` can only occur within the encoder input ids. + encoder_input_ids (`int`): + The encoder_input_ids that should not be repeated within the decoder ids. + + Examples: + + ```py + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer("Alice: I love cats. What do you love?\nBob:", return_tensors="pt") + + >>> # With greedy decoding, we see Bob repeating Alice's opinion. If Bob was a chatbot, it would be a poor one. + >>> outputs = model.generate(**inputs) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + Alice: I love cats. What do you love? + Bob: I love cats. What do you + + >>> # With this logits processor, we can prevent Bob from repeating Alice's opinion. + >>> outputs = model.generate(**inputs, encoder_no_repeat_ngram_size=2) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + Alice: I love cats. What do you love? + Bob: My cats are very cute. + ``` + """ + + def __init__(self, encoder_ngram_size: int, encoder_input_ids: torch.LongTensor): + if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0: + raise ValueError( + f"`encoder_ngram_size` has to be a strictly positive integer, but is {encoder_ngram_size}" + ) + self.ngram_size = encoder_ngram_size + if len(encoder_input_ids.shape) == 1: + encoder_input_ids = encoder_input_ids.unsqueeze(0) + self.batch_size = encoder_input_ids.shape[0] + self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size) + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # B x num_beams + num_hypos = scores.shape[0] + num_beams = num_hypos // self.batch_size + cur_len = input_ids.shape[-1] + scores_processed = scores.clone() + banned_batch_tokens = [ + _get_generated_ngrams( + self.generated_ngrams[hypo_idx // num_beams], input_ids[hypo_idx], self.ngram_size, cur_len + ) + for hypo_idx in range(num_hypos) + ] + + for i, banned_tokens in enumerate(banned_batch_tokens): + scores_processed[i, banned_tokens] = -float("inf") + + return scores_processed + + +class SequenceBiasLogitsProcessor(LogitsProcessor): + """ + [`LogitsProcessor`] that applies an additive bias on sequences. The bias is applied to the last token of a sequence + when the next generated token can complete it. Consequently, to take the most of biasing sequences with more than + one token, consider using beam methods (to gracefully work around partially completed sequences that have a + negative bias) and applying the bias to their prefixes (to ensure the bias is applied earlier). + + + + In order to get the token ids of the sequences that you want to bias, make sure to set `add_prefix_space=True` when + initializing the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The + `add_prefix_space` argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours + come from `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers). + + + + Args: + sequence_bias (`Dict[Tuple[int], float]`): + Dictionary that maps a sequence of tokens to its bias term. Positive biases increase the odds of the + sequence being selected, while negative biases do the opposite. If a sequence has a length of 1, its bias + will always be applied. Otherwise, the bias will only be applied if the sequence in question is about to be + completed (in the token selection step after this processor is applied). + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> inputs = tokenizer(["The full name of Donald is Donald"], return_tensors="pt") + + >>> summary_ids = model.generate(inputs["input_ids"], max_new_tokens=4) + >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True)[0]) + The full name of Donald is Donald J. Trump Jr + + >>> # Now let's control generation through a bias. Please note that the tokenizer is initialized differently! + >>> tokenizer_with_prefix_space = AutoTokenizer.from_pretrained("openai-community/gpt2", add_prefix_space=True) + + + >>> def get_tokens_as_tuple(word): + ... return tuple(tokenizer_with_prefix_space([word], add_special_tokens=False).input_ids[0]) + + + >>> # If we add a negative bias without beam search, it may become "stuck" in a prefix without good continuations + >>> sequence_bias = {get_tokens_as_tuple("Trump"): -10.0} + >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, sequence_bias=sequence_bias) + >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0]) + The full name of Donald is Donald J. Donald, + + >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, num_beams=4, sequence_bias=sequence_bias) + >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0]) + The full name of Donald is Donald Rumsfeld, + + >>> # We can also add a positive bias to nudge the model towards specific tokens or continuations + >>> sequence_bias = {get_tokens_as_tuple("Donald Duck"): 10.0} + >>> biased_ids = model.generate(inputs["input_ids"], max_new_tokens=4, num_beams=4, sequence_bias=sequence_bias) + >>> print(tokenizer.batch_decode(biased_ids, skip_special_tokens=True)[0]) + The full name of Donald is Donald Duck. + ``` + """ + + def __init__(self, sequence_bias: Dict[Tuple[int], float]): + self.sequence_bias = sequence_bias + self._validate_arguments() + + # Bias variables that will be populated on the first call (for retrocompatibility purposes, the vocabulary size + # is infered in the first usage, which inhibits initializing here) + self.length_1_bias = None + self.prepared_bias_variables = False + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # 1 - Prepares the bias tensors. This is only needed the first time the logit processor is called. + if not self.prepared_bias_variables: + self._prepare_bias_variables(scores) + + # 2 - prepares an empty bias to add + bias = torch.zeros_like(scores) + + # 3 - include the bias from length = 1 + bias += self.length_1_bias + + # 4 - include the bias from length > 1, after determining which biased sequences may be completed. + for sequence_ids, sequence_bias in self.sequence_bias.items(): + if len(sequence_ids) == 1: # the sequence is of length 1, already applied + continue + if len(sequence_ids) > input_ids.shape[1]: # the sequence is longer than the context, ignore + continue + prefix_length = len(sequence_ids) - 1 + last_token = sequence_ids[-1] + matching_rows = torch.eq( + input_ids[:, -prefix_length:], + torch.tensor(sequence_ids[:-1], dtype=input_ids.dtype, device=input_ids.device), + ).prod(dim=1) + bias[:, last_token] += torch.where( + matching_rows.bool(), + torch.tensor(sequence_bias, device=input_ids.device), + torch.tensor(0.0, device=input_ids.device), + ) + + # 5 - apply the bias to the scores + scores_processed = scores + bias + return scores_processed + + def _prepare_bias_variables(self, scores: torch.FloatTensor): + vocabulary_size = scores.shape[-1] + + # Check biased tokens out of bounds + invalid_biases = [] + for sequence_ids in self.sequence_bias: + for token_id in sequence_ids: + if token_id >= vocabulary_size: + invalid_biases.append(token_id) + if len(invalid_biases) > 0: + raise ValueError( + f"The model vocabulary size is {vocabulary_size}, but the following tokens were being biased: " + f"{invalid_biases}" + ) + + # Precompute the bias tensors to be applied. Sequences of length 1 are kept separately, as they can be applied + # with simpler logic. + self.length_1_bias = torch.zeros((vocabulary_size,), dtype=torch.float).to(scores.device) + for sequence_ids, bias in self.sequence_bias.items(): + if len(sequence_ids) == 1: + self.length_1_bias[sequence_ids[-1]] = bias + + self.prepared_bias_variables = True + + def _validate_arguments(self): + sequence_bias = self.sequence_bias + if not isinstance(sequence_bias, dict) or len(sequence_bias) == 0: + raise ValueError(f"`sequence_bias` has to be a non-empty dictionary, but is {sequence_bias}.") + if any(not isinstance(sequence_ids, tuple) for sequence_ids in sequence_bias.keys()): + raise ValueError(f"`sequence_bias` has to be a dict with tuples as keys, but is {sequence_bias}.") + if any( + any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in sequence_ids) + or len(sequence_ids) == 0 + for sequence_ids in sequence_bias.keys() + ): + raise ValueError( + f"Each key in `sequence_bias` has to be a non-empty tuple of positive integers, but is " + f"{sequence_bias}." + ) + if any(not isinstance(bias, float) for bias in sequence_bias.values()): + raise ValueError(f"`sequence_bias` has to be a dict with floats as values, but is {sequence_bias}.") + + +class NoBadWordsLogitsProcessor(SequenceBiasLogitsProcessor): + """ + [`LogitsProcessor`] that enforces that specified sequences will never be selected. + + + + In order to get the token ids of the words that should not appear in the generated text, make sure to set + `add_prefix_space=True` when initializing the tokenizer, and use `tokenizer(bad_words, + add_special_tokens=False).input_ids`. The `add_prefix_space` argument is only supported for some slow tokenizers, + as fast tokenizers' prefixing behaviours come from `pre tokenizers`. Read more + [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers). + + + + Args: + bad_words_ids (`List[List[int]]`): + List of list of token ids that are not allowed to be generated. + eos_token_id (`Union[int, List[int]]`): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> inputs = tokenizer(["In a word, the cake is a"], return_tensors="pt") + + >>> output_ids = model.generate(inputs["input_ids"], max_new_tokens=5, pad_token_id=tokenizer.eos_token_id) + >>> print(tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]) + In a word, the cake is a bit of a mess. + + >>> # Now let's take the bad words out. Please note that the tokenizer is initialized differently + >>> tokenizer_with_prefix_space = AutoTokenizer.from_pretrained("openai-community/gpt2", add_prefix_space=True) + + + >>> def get_tokens_as_list(word_list): + ... "Converts a sequence of words into a list of tokens" + ... tokens_list = [] + ... for word in word_list: + ... tokenized_word = tokenizer_with_prefix_space([word], add_special_tokens=False).input_ids[0] + ... tokens_list.append(tokenized_word) + ... return tokens_list + + + >>> bad_words_ids = get_tokens_as_list(word_list=["mess"]) + >>> output_ids = model.generate( + ... inputs["input_ids"], max_new_tokens=5, bad_words_ids=bad_words_ids, pad_token_id=tokenizer.eos_token_id + ... ) + >>> print(tokenizer.batch_decode(output_ids, skip_special_tokens=True)[0]) + In a word, the cake is a bit of a surprise. + ``` + """ + + def __init__(self, bad_words_ids: List[List[int]], eos_token_id: Union[int, List[int]]): + self.bad_word_ids = bad_words_ids + self._validate_arguments() + + # Filter EOS token from bad_words_ids + if eos_token_id is None: + eos_token_id = [] + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + bad_words_ids = list( + filter(lambda bad_token_seq: all(bad_token_seq != [i] for i in eos_token_id), bad_words_ids) + ) + + # Forbidding a sequence is equivalent to setting its bias to -inf + sequence_bias = {tuple(sequence): float("-inf") for sequence in bad_words_ids} + super().__init__(sequence_bias=sequence_bias) + + def _validate_arguments(self): + bad_words_ids = self.bad_word_ids + if not isinstance(bad_words_ids, list) or len(bad_words_ids) == 0: + raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.") + if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids): + raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.") + if any( + any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids) + for bad_word_ids in bad_words_ids + ): + raise ValueError( + f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}." + ) + + +class PrefixConstrainedLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that enforces constrained generation and is useful for prefix-conditioned constrained + generation. See [Autoregressive Entity Retrieval](https://arxiv.org/abs/2010.00904) for more information. + + Args: + prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`): + This function constraints the beam search to allowed tokens only at each step. This function takes 2 + arguments `inputs_ids` and the batch ID `batch_id`. It has to return a list with the allowed tokens for the + next generation step conditioned on the previously generated tokens `inputs_ids` and the batch ID + `batch_id`. + + Examples: + + ```py + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("bigscience/bloomz-560m") + >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/bloomz-560m") + + >>> inputs = tokenizer("Alice and Bob", return_tensors="pt") + + >>> # By default, it continues generating according to the model's logits + >>> outputs = model.generate(**inputs, max_new_tokens=5) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + Alice and Bob are friends + + >>> # We can contrain it with `prefix_allowed_tokens_fn` to force a certain behavior based on a prefix. + >>> # For instance, we can force an entire entity to be generated when its beginning is detected. + >>> entity = tokenizer(" Bob Marley", return_tensors="pt").input_ids[0] # 3 tokens + >>> def prefix_allowed_tokens_fn(batch_id, input_ids): + ... ''' + ... Attempts to generate 'Bob Marley' when 'Bob' is detected. + ... In this case, `batch_id` is not used, but you can set rules for each batch member. + ... ''' + ... if input_ids[-1] == entity[0]: + ... return [entity[1].item()] + ... elif input_ids[-2] == entity[0] and input_ids[-1] == entity[1]: + ... return [entity[2].item()] + ... return list(range(tokenizer.vocab_size)) # If no match, allow all tokens + + >>> outputs = model.generate(**inputs, max_new_tokens=5, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn) + >>> print(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]) + Alice and Bob Marley + ``` + """ + + def __init__(self, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int): + self._prefix_allowed_tokens_fn = prefix_allowed_tokens_fn + self._num_beams = num_beams + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + mask = torch.full_like(scores, -math.inf) + for batch_id, beam_sent in enumerate(input_ids.view(-1, self._num_beams, input_ids.shape[-1])): + for beam_id, sent in enumerate(beam_sent): + prefix_allowed_tokens = self._prefix_allowed_tokens_fn(batch_id, sent) + if len(prefix_allowed_tokens) == 0: + raise ValueError( + f"`prefix_allowed_tokens_fn` returned an empty list for batch ID {batch_id}." + f"This means that the constraint is unsatisfiable. Please check your implementation" + f"of `prefix_allowed_tokens_fn` " + ) + mask[batch_id * self._num_beams + beam_id, prefix_allowed_tokens] = 0 + + scores_processed = scores + mask + return scores_processed + + +class HammingDiversityLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that enforces diverse beam search. + + Note that this logits processor is only effective for [`PreTrainedModel.group_beam_search`]. See [Diverse Beam + Search: Decoding Diverse Solutions from Neural Sequence Models](https://arxiv.org/pdf/1610.02424.pdf) for more + details. + + Traditional beam search often generates very similar sequences across different beams. + `HammingDiversityLogitsProcessor` addresses this by penalizing beams that generate tokens already chosen by other + beams in the same time step. + + Args: + diversity_penalty (`float`): + This value is subtracted from a beam's score if it generates a token same as any beam from other group at a + particular time. A higher `diversity_penalty` will enforce greater diversity among the beams. Adjusting + this value can help strike a balance between diversity and natural likelihood. + num_beams (`int`): + Number of beams for beam search. 1 means no beam search. + num_beam_groups (`int`): + Number of groups to divide `num_beams` into in order to ensure diversity among different groups of beams. + [this paper](https://arxiv.org/pdf/1610.02424.pdf) for more details. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM + >>> import torch + + >>> # Initialize the model and tokenizer + >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") + >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base") + + >>> # A long text about the solar system + >>> text = ( + ... "The Solar System is a gravitationally bound system comprising the Sun and the objects that orbit it, " + ... "either directly or indirectly. Of the objects that orbit the Sun directly, the largest are the eight " + ... "planets, with the remainder being smaller objects, such as the five dwarf planets and small Solar System " + ... "bodies. The Solar System formed 4.6 billion years ago from the gravitational collapse of a giant " + ... "interstellar molecular cloud." + ... ) + >>> inputs = tokenizer("summarize: " + text, return_tensors="pt") + + >>> # Generate diverse summary + >>> outputs_diverse = model.generate( + ... **inputs, + ... num_beam_groups=2, + ... diversity_penalty=10.0, + ... max_length=100, + ... num_beams=4, + ... num_return_sequences=2, + ... ) + >>> summaries_diverse = tokenizer.batch_decode(outputs_diverse, skip_special_tokens=True) + + >>> # Generate non-diverse summary + >>> outputs_non_diverse = model.generate( + ... **inputs, + ... max_length=100, + ... num_beams=4, + ... num_return_sequences=2, + ... ) + >>> summary_non_diverse = tokenizer.batch_decode(outputs_non_diverse, skip_special_tokens=True) + + >>> # With `diversity_penalty`, the resulting beams are much more diverse + >>> print(summary_non_diverse) + ['the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.', + 'the Solar System formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.'] + + >>> print(summaries_diverse) + ['the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets.', + 'the solar system formed 4.6 billion years ago from the collapse of a giant interstellar molecular cloud. of the objects that orbit the Sun directly, the largest are the eight planets. the rest of the objects are smaller objects, such as the five dwarf planets and small solar system bodies.'] + ``` + """ + + def __init__(self, diversity_penalty: float, num_beams: int, num_beam_groups: int): + if not isinstance(diversity_penalty, float) or (not diversity_penalty > 0.0): + raise ValueError("`diversity_penalty` should be a float strictly larger than 0.") + self._diversity_penalty = diversity_penalty + if not isinstance(num_beams, int) or num_beams < 2: + raise ValueError("`num_beams` should be an integer strictly larger than 1.") + self._num_beams = num_beams + if not isinstance(num_beam_groups, int) or num_beam_groups < 2: + raise ValueError("`num_beam_groups` should be an integer strictly larger than 1.") + if num_beam_groups > num_beams: + raise ValueError("`beam_groups` has to be smaller or equal to `num_beams`.") + self._num_sub_beams = num_beams // num_beam_groups + + def __call__( + self, + input_ids: torch.LongTensor, + scores: torch.FloatTensor, + current_tokens: torch.LongTensor, + beam_group_idx: int, + ) -> torch.FloatTensor: + r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids) + scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using + beam search or log softmax for each vocabulary token when using beam search + current_tokens (`torch.LongTensor` of shape `(batch_size)`): + Indices of input sequence tokens in the vocabulary, corresponding to the tokens selected by the other + beam groups in the current generation step. + beam_group_idx (`int`): + The index of the beam group currently being processed. + + Return: + `torch.FloatTensor` of shape `(batch_size, config.vocab_size)`: + The processed prediction scores. + """ + # hamming diversity: penalise using same token in current group which was used in previous groups at + # the same time step + batch_size = current_tokens.shape[0] // self._num_beams + group_start_idx = beam_group_idx * self._num_sub_beams + group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams) + group_size = group_end_idx - group_start_idx + vocab_size = scores.shape[-1] + + if group_start_idx == 0: + return scores + + scores_processed = scores.clone() + for batch_idx in range(batch_size): + # predicted tokens of last time step of previous groups + previous_group_tokens = current_tokens[ + batch_idx * self._num_beams : batch_idx * self._num_beams + group_start_idx + ] + token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device) + scores_processed[batch_idx * group_size : (batch_idx + 1) * group_size] -= ( + self._diversity_penalty * token_frequency + ) + + return scores_processed + + +class ForcedBOSTokenLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that enforces the specified token as the first generated token. Used with encoder-decoder + models. + + Args: + bos_token_id (`int`): + The id of the token to force as the first generated token. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM + + >>> model = AutoModelForSeq2SeqLM.from_pretrained("google/flan-t5-small") + >>> tokenizer = AutoTokenizer.from_pretrained("google/flan-t5-small") + + >>> inputs = tokenizer("Translate from English to German: I love cats.", return_tensors="pt") + + >>> # By default, it continues generating according to the model's logits + >>> outputs = model.generate(**inputs, max_new_tokens=10) + >>> print(tokenizer.batch_decode(outputs)[0]) + Ich liebe Kitty. + + >>> # We can use `forced_bos_token_id` to force the start of generation with an encoder-decoder model + >>> # (including forcing it to end straight away with an EOS token) + >>> outputs = model.generate(**inputs, max_new_tokens=10, forced_bos_token_id=tokenizer.eos_token_id) + >>> print(tokenizer.batch_decode(outputs)[0]) + + ``` + """ + + def __init__(self, bos_token_id: int): + self.bos_token_id = bos_token_id + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + cur_len = input_ids.shape[-1] + scores_processed = scores + if cur_len == 1: + scores_processed = torch.full_like(scores, -math.inf) + scores_processed[:, self.bos_token_id] = 0 + return scores_processed + + +class ForcedEOSTokenLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. + + Args: + max_length (`int`): + The maximum length of the sequence to be generated. + eos_token_id (`Union[int, List[int]]`): + The id of the token to force as the last generated token when `max_length` is reached. Optionally, use a + list to set multiple *end-of-sequence* tokens. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") + + >>> inputs = tokenizer("A sequence: 1, 2, 3", return_tensors="pt") + + >>> # By default, it continues generating according to the model's logits + >>> outputs = model.generate(**inputs, max_new_tokens=10) + >>> print(tokenizer.batch_decode(outputs)[0]) + A sequence: 1, 2, 3, 4, 5, 6, 7, 8 + + >>> # `forced_eos_token_id` ensures the generation ends with a EOS token + >>> outputs = model.generate(**inputs, max_new_tokens=10, forced_eos_token_id=tokenizer.eos_token_id) + >>> print(tokenizer.batch_decode(outputs)[0]) + A sequence: 1, 2, 3, 4, 5, 6, 7,<|endoftext|> + ``` + """ + + def __init__(self, max_length: int, eos_token_id: Union[int, List[int]]): + self.max_length = max_length + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + self.eos_token_id = eos_token_id + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + cur_len = input_ids.shape[-1] + scores_processed = scores + if cur_len == self.max_length - 1: + scores_processed = torch.full_like(scores, -math.inf) + scores_processed[:, self.eos_token_id] = 0 + return scores_processed + + +class InfNanRemoveLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] that removes all `nan` and `inf` values to avoid the generation method to fail. Note that using + the logits processor should only be used if necessary since it can slow down the generation method. + + This logits processor has no `generate` example, as there shouldn't be a correct combination of flags that warrants + its use. + """ + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # set all nan values to 0.0 + scores_processed = torch.where(scores != scores, 0.0, scores) + + # set all +/-inf values to max/min possible value + scores_processed = torch.where(scores == float("inf"), torch.finfo(scores.dtype).max, scores_processed) + scores_processed = torch.where(scores == -float("inf"), torch.finfo(scores.dtype).min, scores_processed) + + return scores_processed + + +class ExponentialDecayLengthPenalty(LogitsProcessor): + r""" + [`LogitsProcessor`] that exponentially increases the score of the `eos_token_id` after `start_index` has been + reached. This allows generating shorter sequences without having a hard cutoff, allowing the `eos_token` to be + predicted in a meaningful position. + + Args: + exponential_decay_length_penalty (`tuple(int, float)`): + This tuple shall consist of: `(start_index, decay_factor)` where `start_index` indicates where penalty + starts and `decay_factor` represents the factor of exponential decay + eos_token_id (`Union[int, List[int]]`): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + input_ids_seq_length (`int`): + The length of the input sequence. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM, set_seed + + >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + + >>> text = "Just wanted to let you know, I" + >>> inputs = tokenizer(text, return_tensors="pt") + + >>> # Let's consider that we want short sentences, so we limit `max_length=30`. However, we observe that the answer + >>> # tends to end abruptly. + >>> set_seed(1) + >>> outputs = model.generate(**inputs, do_sample=True, temperature=0.9, max_length=30, pad_token_id=50256) + >>> print(tokenizer.batch_decode(outputs)[0]) + Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network which was + published in 2010. Although + + >>> # To promote the appearance of the EOS token at the right time, we add the `exponential_decay_length_penalty = + >>> # (start_index, decay_factor)`. Instead of cutting at max_tokens, the output comes to an end before and usually + >>> # with more meaning. What happens is that starting from `start_index` the EOS token score will be increased + >>> # by `decay_factor` exponentially. However, if you set a high decay factor, you may also end up with abruptly + >>> # ending sequences. + >>> set_seed(1) + >>> outputs = model.generate( + ... **inputs, + ... do_sample=True, + ... temperature=0.9, + ... max_length=30, + ... pad_token_id=50256, + ... exponential_decay_length_penalty=(15, 1.6), + ... ) + >>> print(tokenizer.batch_decode(outputs)[0]) + Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network + which<|endoftext|> + + >>> # With a small decay factor, you will have a higher chance of getting a meaningful sequence. + >>> set_seed(1) + >>> outputs = model.generate( + ... **inputs, + ... do_sample=True, + ... temperature=0.9, + ... max_length=30, + ... pad_token_id=50256, + ... exponential_decay_length_penalty=(15, 1.01), + ... ) + >>> print(tokenizer.batch_decode(outputs)[0]) + Just wanted to let you know, I received a link to an ebook, the book How To Start A Social Network which was + published in 2010.<|endoftext|> + ``` + """ + + def __init__( + self, + exponential_decay_length_penalty: Tuple[int, float], + eos_token_id: Union[int, List[int]], + input_ids_seq_length: int, + ): + self.regulation_start = exponential_decay_length_penalty[0] + input_ids_seq_length + self.regulation_factor = exponential_decay_length_penalty[1] + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + self.eos_token_id = eos_token_id + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + cur_len = input_ids.shape[-1] + penalties = torch.zeros_like(scores) + scores_processed = scores + if cur_len > self.regulation_start: + for i in self.eos_token_id: + penalty_idx = cur_len - self.regulation_start + # To support negative logits we compute the penalty of the absolute value and add to the original logit + penalty = torch.abs(scores[:, i]) * (pow(self.regulation_factor, penalty_idx) - 1) + penalties[:, i] = penalty + scores_processed = scores + penalties + return scores_processed + + +class LogitNormalization(LogitsProcessor, LogitsWarper): + r""" + [`LogitsWarper`] and [`LogitsProcessor`] for normalizing the scores using log-softmax. It's important to normalize + the scores during beam search, after applying the logits processors or warpers, since the search algorithm used in + this library doesn't do it (it only does it before, but they may need re-normalization) but it still supposes that + the scores are normalized when comparing the hypotheses. + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + >>> import torch + + >>> model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilgpt2") + + >>> inputs = tokenizer("A sequence: 1, 2, 3", return_tensors="pt") + + >>> # By default, the scores are not normalized -- the sum of their exponentials is NOT a normalized probability + >>> # distribution, summing to 1 + >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) + >>> print(torch.allclose(torch.sum(torch.exp(outputs.scores[-1])), torch.Tensor((1.000,)), rtol=1e-4)) + False + + >>> # Normalizing them may have a positive impact on beam methods, or when using the scores on your application + >>> outputs = model.generate(**inputs, renormalize_logits=True, return_dict_in_generate=True, output_scores=True) + >>> print(torch.allclose(torch.sum(torch.exp(outputs.scores[-1])), torch.Tensor((1.000,)), rtol=1e-4)) + True + ``` + """ + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + scores_processed = scores.log_softmax(dim=-1) + return scores_processed + + +class SuppressTokensAtBeginLogitsProcessor(LogitsProcessor): + r""" + [`SuppressTokensAtBeginLogitsProcessor`] supresses a list of tokens as soon as the `generate` function starts + generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` are + not generated at the begining. Originally created for + [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper). + + Examples: + + ```python + >>> from transformers import AutoProcessor, WhisperForConditionalGeneration + >>> from datasets import load_dataset + + >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt") + + >>> # Whisper has `begin_suppress_tokens` set by default (= `[220, 50256]`). 50256 is the EOS token, so this means + >>> # it can't generate and EOS token in the first iteration, but it can in the others. + >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) + >>> print(outputs.scores[0][0, 50256]) + tensor(-inf) + >>> print(outputs.scores[-1][0, 50256]) # in other places we can see some probability mass for EOS + tensor(29.9010) + + >>> # If we disable `begin_suppress_tokens`, we can generate EOS in the first iteration. + >>> outputs = model.generate( + ... **inputs, return_dict_in_generate=True, output_scores=True, begin_suppress_tokens=None + ... ) + >>> print(outputs.scores[0][0, 50256]) + tensor(11.2027) + ``` + """ + + def __init__(self, begin_suppress_tokens, begin_index): + self.begin_suppress_tokens = list(begin_suppress_tokens) + self.begin_index = begin_index + + def set_begin_index(self, begin_index): + self.begin_index = begin_index + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + vocab_tensor = torch.arange(scores.shape[-1], device=scores.device) + begin_suppress_tokens = torch.tensor(self.begin_suppress_tokens, device=scores.device) + suppress_token_mask = torch.isin(vocab_tensor, begin_suppress_tokens) + scores_processed = scores + if input_ids.shape[-1] == self.begin_index: + scores_processed = torch.where(suppress_token_mask, -float("inf"), scores) + + return scores_processed + + +class SuppressTokensLogitsProcessor(LogitsProcessor): + r""" + This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so + that they are not generated. Originally created for + [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper). + + Examples: + + ```python + >>> from transformers import AutoProcessor, WhisperForConditionalGeneration + >>> from datasets import load_dataset + + >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> inputs = processor(ds[0]["audio"]["array"], return_tensors="pt") + + >>> # Whisper has a long list of suppressed tokens. For instance, in this case, the token 1 is suppressed by default. + >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True) + >>> print(outputs.scores[1][0, 1]) # 1 (and not 0) is the first freely generated token + tensor(-inf) + + >>> # If we disable `suppress_tokens`, we can generate it. + >>> outputs = model.generate(**inputs, return_dict_in_generate=True, output_scores=True, suppress_tokens=None) + >>> print(outputs.scores[1][0, 1]) + tensor(6.0678) + ``` + """ + + def __init__(self, suppress_tokens): + self.suppress_tokens = list(suppress_tokens) + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + vocab_tensor = torch.arange(scores.shape[-1], device=scores.device) + suppress_tokens = torch.tensor(self.suppress_tokens, device=scores.device) + suppress_token_mask = torch.isin(vocab_tensor, suppress_tokens) + scores = torch.where(suppress_token_mask, -float("inf"), scores) + return scores + + +class ForceTokensLogitsProcessor(LogitsProcessor): + r""" + This processor takes a list of pairs of integers which indicates a mapping from generation indices to token + indices that will be forced before generation. The processor will set their log probs to `inf` so that they are + sampled at their corresponding index. Originally created for + [Whisper](https://huggingface.co/docs/transformers/model_doc/whisper). + """ + + def __init__(self, force_token_map: List[List[int]], _has_warned: Optional[bool] = False): + self.force_token_map = dict(force_token_map) + if not _has_warned: + # TODO(Sanchit): remove this processor entirely in v4.40 + warnings.warn( + "This `ForceTokensLogitsProcessor` has been deprecated and will be removed in v4.40. Should you need to provide prompt ids for generation, specify `input_ids` to the generate method for decoder-only models, or `decoder_input_ids` for encoder-decoder models.", + FutureWarning, + ) + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + generation_idx = input_ids.shape[-1] + current_token = self.force_token_map.get(generation_idx, None) + scores_processed = scores + if current_token is not None: + scores_processed = torch.full_like(scores, -float("inf")) + scores_processed[:, current_token] = 0 + return scores_processed + + +class WhisperTimeStampLogitsProcessor(LogitsProcessor): + r""" + + [`LogitsProcessor`] that modifies the logits for the generation of timestamps in the transcription. When the input + tokens are at a specific threshold, the processor sets the scores to negative infinity. The processor makes sure + that timestamp tokens appear in pairs, by masking out the logits that would break this pairing pattern. This is + done to maintain the consistency and structure of generated timestamps. It also ensures that when the predicted + probability of sampling any of the timestamp token is greater than any individual non-timestamp token, those + non-timestamp logits are set to negative infinity. This is done to ensure the generation of timestamps over other + potential tokens. + + + See [the paper](https://arxiv.org/abs/2212.04356) for more information. + + Args: + generate_config (`GenerateConfig`): + The generate config used to generate the output. The following parameters are required: + eos_token_id (`int`, *optional*, defaults to 50257): + The id of the *end-of-sequence* token. + no_timestamps_token_id (`int`, *optional*, defaults to 50363): + The id of the `"<|notimestamps|>"` token. + max_initial_timestamp_index (`int`, *optional*, defaults to 1): + Used to set the maximum value of the initial timestamp. This is used to prevent the model from + predicting timestamps that are too far in the future. + begin_index (`Optional`, *optional*): Token index of the first token that is generated by the model. + _detect_timestamp_from_logprob (`bool`, *optional*): Whether timestamps can be predicted from logprobs over all timestamps. + + Examples: + ``` python + >>> import torch + >>> from transformers import AutoProcessor, WhisperForConditionalGeneration, GenerationConfig + >>> from datasets import load_dataset + + >>> processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") + >>> model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") + >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") + >>> inputs = processor(ds[3]["audio"]["array"], return_tensors="pt") + >>> input_features = inputs.input_features + + >>> #Displaying timestamps + >>> generated_ids = model.generate(inputs=input_features, return_timestamps=True) + >>> transcription = processor.batch_decode(generated_ids, decode_with_timestamps=True)[0] + >>> print("Transcription:", transcription) + Transcription: <|startoftranscript|><|0.00|> He has grave doubts whether Sir Frederick Layton's work is really Greek after all, and can<|6.44|><|6.44|> discover in it but little of rocky Ithaca.<|9.44|><|endoftext|> + + + >>> #No timestamps & change EOS: + >>> #This allows the user to select a specific token to terminate the sequence on, in this case it's the word "can"(460) + >>> model.generation_config.eos_token_id = 460 + >>> generated_ids = model.generate(inputs=input_features,return_timestamps=False) + >>> transcription = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + >>> print("Transcription:", transcription) + Transcription: He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can + ``` + """ + + def __init__( + self, generate_config, begin_index: Optional[int] = None, _detect_timestamp_from_logprob: Optional[bool] = None + ): # support for the kwargs + self.no_timestamps_token_id = generate_config.no_timestamps_token_id + self.timestamp_begin = generate_config.no_timestamps_token_id + 1 + self.eos_token_id = generate_config.eos_token_id or generate_config.bos_token_id + + # this variable is mostly just used for testing + self._detect_timestamp_from_logprob = ( + _detect_timestamp_from_logprob + if _detect_timestamp_from_logprob is not None + else getattr(generate_config, "_detect_timestamp_from_logprob", True) + ) + + num_forced_ids = ( + len(generate_config.forced_decoder_ids) if generate_config.forced_decoder_ids is not None else 0 + ) + self.begin_index = begin_index or (num_forced_ids + 1) + + self.max_initial_timestamp_index = getattr(generate_config, "max_initial_timestamp_index", None) + # TODO(Patrick): Make sure that official models have max_initial_timestamp_index set to 50 + # self.max_initial_timestamp_index = 50 + + def set_begin_index(self, begin_index): + self.begin_index = begin_index + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # suppress <|notimestamps|> which is handled by without_timestamps + scores_processed = scores.clone() + scores_processed[:, self.no_timestamps_token_id] = -float("inf") + + # timestamps have to appear in pairs, except directly before eos_token; mask logits accordingly + for k in range(input_ids.shape[0]): + sampled_tokens = input_ids[k, self.begin_index :] + seq = list(sampled_tokens.tolist()) + + last_was_timestamp = len(seq) >= 1 and seq[-1] >= self.timestamp_begin + penultimate_was_timestamp = len(seq) < 2 or seq[-2] >= self.timestamp_begin + + if last_was_timestamp: + if penultimate_was_timestamp: # has to be non-timestamp + scores_processed[k, self.timestamp_begin :] = -float("inf") + else: # cannot be normal text tokens + scores_processed[k, : self.eos_token_id] = -float("inf") + + timestamps = sampled_tokens[sampled_tokens.ge(self.timestamp_begin)] + if timestamps.numel() > 0: + # `timestamps` shouldn't decrease; forbid timestamp tokens smaller than the last + # The following lines of code are copied from: https://github.com/openai/whisper/pull/914/files#r1137085090 + if last_was_timestamp and not penultimate_was_timestamp: + timestamp_last = timestamps[-1] + else: + # Avoid to emit <|0.00|> again + timestamp_last = timestamps[-1] + 1 + + scores_processed[k, self.timestamp_begin : timestamp_last] = -float("inf") + + # apply the `max_initial_timestamp` option + if input_ids.shape[1] == self.begin_index: + scores_processed[:, : self.timestamp_begin] = -float("inf") + + if self.max_initial_timestamp_index is not None: + last_allowed = self.timestamp_begin + self.max_initial_timestamp_index + scores_processed[:, last_allowed + 1 :] = -float("inf") + + # if sum of probability over timestamps is above any other token, sample timestamp + logprobs = torch.nn.functional.log_softmax(scores_processed.float(), dim=-1) + for k in range(input_ids.shape[0]): + timestamp_logprob = logprobs[k, self.timestamp_begin :].logsumexp(dim=-1) + max_text_token_logprob = logprobs[k, : self.timestamp_begin].max() + if timestamp_logprob > max_text_token_logprob and self._detect_timestamp_from_logprob: + scores_processed[k, : self.timestamp_begin] = -float("inf") + + return scores_processed + + +class WhisperNoSpeechDetection(LogitsProcessor): + r"""This processor can be used to detect silence when using Whisper. It should take as input unprocessed logits to follow the original implementation""" + + def __init__(self, no_speech_token: int, begin_index: int, scores_is_logprobs: bool = False): + self.no_speech_token = no_speech_token + # offset between token, , in paper and first generated token + # is equal to the position of the first generated token index + self.start_of_trans_offset = begin_index + + # `self.begin_index` is a running value that is changed on the fly + self.begin_index = begin_index + self._no_speech_prob = [0.0] + self.is_scores_logprobs = scores_is_logprobs + + # overwritten dynamically + self.model = None + self.inputs = None + + def set_model(self, model): + self.model = model + + def set_inputs(self, inputs): + self.inputs = {**self.model.prepare_inputs_for_generation(**inputs), **inputs} + self.inputs["input_features"] = self.inputs.pop("inputs") + + @property + def no_speech_prob(self): + return self._no_speech_prob + + def set_begin_index(self, begin_index): + self.begin_index = begin_index + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + is_scores_logprobs = self.is_scores_logprobs + + if input_ids.shape[1] == self.begin_index: + if self.start_of_trans_offset > 1: + with torch.no_grad(): + logits = self.model(**self.inputs).logits + + no_speech_index = self.begin_index - self.start_of_trans_offset + no_speech_scores = logits[:, no_speech_index] + is_scores_logprobs = False + else: + no_speech_scores = scores + + if is_scores_logprobs: + probs = no_speech_scores.exp() + else: + probs = no_speech_scores.float().softmax(dim=-1) + + self._no_speech_prob = probs[:, self.no_speech_token] + + return scores + + +class ClassifierFreeGuidanceLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] for classifier free guidance (CFG). The scores are split over the batch dimension, + where the first half correspond to the conditional logits (predicted from the input prompt) and the second half + correspond to the unconditional logits (predicted from an empty or 'null' prompt). The processor computes a + weighted average across the conditional and unconditional logits, parameterised by the `guidance_scale`. + + See [the paper](https://arxiv.org/abs/2306.05284) for more information. + + + + This logits processor is exclusively compatible with + [MusicGen](https://huggingface.co/docs/transformers/main/en/model_doc/musicgen) + + + + Args: + guidance_scale (float): + The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale > 1`. + Higher guidance scale encourages the model to generate samples that are more closely linked to the input + prompt, usually at the expense of poorer quality. + + Examples: + + ```python + >>> from transformers import AutoProcessor, MusicgenForConditionalGeneration + + >>> processor = AutoProcessor.from_pretrained("facebook/musicgen-small") + >>> model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small") + + >>> inputs = processor( + ... text=["80s pop track with bassy drums and synth", "90s rock song with loud guitars and heavy drums"], + ... padding=True, + ... return_tensors="pt", + ... ) + >>> audio_values = model.generate(**inputs, do_sample=True, guidance_scale=3, max_new_tokens=256) + ``` + """ + + def __init__(self, guidance_scale): + if guidance_scale > 1: + self.guidance_scale = guidance_scale + else: + raise ValueError( + "Require guidance scale >1 to use the classifier free guidance processor, got guidance scale " + f"{guidance_scale}." + ) + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + # simple check to make sure we have compatible batch sizes between our + # logits scores (cond + uncond) and input ids (cond only) + if scores.shape[0] != 2 * input_ids.shape[0]: + raise ValueError( + f"Logits should have twice the batch size of the input ids, the first half of batches corresponding to " + f"the conditional inputs, and the second half of batches corresponding to the unconditional inputs. Got " + f"batch size {scores.shape[0]} for the logits and {input_ids.shape[0]} for the input ids." + ) + unguided_bsz = scores.shape[0] // 2 + cond_logits, uncond_logits = scores.split(unguided_bsz, dim=0) + scores_processed = uncond_logits + (cond_logits - uncond_logits) * self.guidance_scale + return scores_processed + + +class AlternatingCodebooksLogitsProcessor(LogitsProcessor): + r""" + [`LogitsProcessor`] enforcing alternated generation between the two codebooks of Bark. + + + + This logits processor is exclusively compatible with + [Bark](https://huggingface.co/docs/transformers/en/model_doc/bark)'s fine submodel. See the model documentation + for examples. + + + + Args: + input_start_len (`int`): + The length of the initial input sequence. + semantic_vocab_size (`int`): + Vocabulary size of the semantic part, i.e number of tokens associated to the semantic vocabulary. + codebook_size (`int`): + Number of tokens associated to the codebook. + """ + + def __init__(self, input_start_len: int, semantic_vocab_size: int, codebook_size: int): + if not isinstance(input_start_len, int) or input_start_len < 0: + raise ValueError(f"`input_starting_length` has to be a non-negative integer, but is {input_start_len}") + + self.input_start_len = input_start_len + self.semantic_vocab_size = semantic_vocab_size + self.codebook_size = codebook_size + + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + curr_len = input_ids.shape[-1] + + # even -> first codebook, odd -> second codebook + is_first_codebook = ((curr_len - self.input_start_len) % 2) == 0 + + scores_processed = scores.clone() + if is_first_codebook: + scores_processed[:, : self.semantic_vocab_size] = -float("inf") + scores_processed[:, self.semantic_vocab_size + self.codebook_size :] = -float("inf") + else: + scores_processed[:, : self.semantic_vocab_size + self.codebook_size] = -float("inf") + + return scores_processed + + +class UnbatchedClassifierFreeGuidanceLogitsProcessor(LogitsProcessor): + r""" + Logits processor for Classifier-Free Guidance (CFG). The processors computes a weighted average across scores + from prompt conditional and prompt unconditional (or negative) logits, parameterized by the `guidance_scale`. + The unconditional scores are computed internally by prompting `model` with the `unconditional_ids` branch. + + See [the paper](https://arxiv.org/abs/2306.17806) for more information. + + Args: + guidance_scale (`float`): + The guidance scale for classifier free guidance (CFG). CFG is enabled by setting `guidance_scale != 1`. + Higher guidance scale encourages the model to generate samples that are more closely linked to the input + prompt, usually at the expense of poorer quality. A value smaller than 1 has the opposite effect, while + making the negative prompt provided with negative_prompt_ids (if any) act as a positive prompt. + model (`PreTrainedModel`): + The model computing the unconditional scores. Supposedly the same as the one computing the conditional + scores. Both models must use the same tokenizer. + unconditional_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of input sequence tokens in the vocabulary for the unconditional branch. If unset, will default to + the last token of the prompt. + unconditional_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Attention mask for unconditional_ids. + use_cache (`bool`, *optional*, defaults to `True`): + Whether to cache key/values during the negative prompt forward pass. + + + Examples: + + ```python + >>> from transformers import AutoTokenizer, AutoModelForCausalLM + + >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> inputs = tokenizer(["Today, a dragon flew over Paris, France,"], return_tensors="pt") + >>> out = model.generate(inputs["input_ids"], guidance_scale=1.5) + >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] + 'Today, a dragon flew over Paris, France, killing at least 50 people and injuring more than 100' + + >>> # with a negative prompt + >>> neg_inputs = tokenizer(["A very happy event happened,"], return_tensors="pt") + >>> out = model.generate(inputs["input_ids"], guidance_scale=2, negative_prompt_ids=neg_inputs["input_ids"]) + >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] + 'Today, a dragon flew over Paris, France, killing at least 130 people. French media reported that' + + >>> # with a positive prompt + >>> neg_inputs = tokenizer(["A very happy event happened,"], return_tensors="pt") + >>> out = model.generate(inputs["input_ids"], guidance_scale=0, negative_prompt_ids=neg_inputs["input_ids"]) + >>> tokenizer.batch_decode(out, skip_special_tokens=True)[0] + "Today, a dragon flew over Paris, France, and I'm very happy to be here. I" + ``` + """ + + def __init__( + self, + guidance_scale: float, + model, + unconditional_ids: Optional[torch.LongTensor] = None, + unconditional_attention_mask: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = True, + ): + self.guidance_scale = guidance_scale + self.model = model + self.unconditional_context = { + "input_ids": unconditional_ids, + "attention_mask": unconditional_attention_mask, + "use_cache": use_cache, + "past_key_values": None, + "first_pass": True, + } + + def get_unconditional_logits(self, input_ids): + if self.unconditional_context["first_pass"]: + if self.unconditional_context["input_ids"] is None: + self.unconditional_context["input_ids"] = input_ids[:, -1:] + if self.unconditional_context["attention_mask"] is None: + self.unconditional_context["attention_mask"] = torch.ones_like( + self.unconditional_context["input_ids"], dtype=torch.long + ) + input_ids = self.unconditional_context["input_ids"] + attention_mask = self.unconditional_context["attention_mask"] + self.unconditional_context["first_pass"] = False + else: + attention_mask = torch.cat( + [ + self.unconditional_context["attention_mask"], + torch.ones_like(input_ids[:, -1:], dtype=torch.long), + ], + dim=1, + ) + if not self.unconditional_context["use_cache"]: + input_ids = torch.cat([self.unconditional_context["input_ids"], input_ids[:, -1:]], dim=1) + else: + input_ids = input_ids[:, -1:] + self.unconditional_context["input_ids"] = input_ids + self.unconditional_context["attention_mask"] = attention_mask + + out = self.model( + input_ids, + attention_mask=attention_mask, + use_cache=self.unconditional_context["use_cache"], + past_key_values=self.unconditional_context["past_key_values"], + ) + self.unconditional_context["past_key_values"] = out.get("past_key_values", None) + + return out.logits + + def __call__(self, input_ids, scores): + scores = torch.nn.functional.log_softmax(scores, dim=-1) + if self.guidance_scale == 1: + return scores + + logits = self.get_unconditional_logits(input_ids) + + unconditional_logits = torch.nn.functional.log_softmax(logits[:, -1], dim=-1) + scores_processed = self.guidance_scale * (scores - unconditional_logits) + unconditional_logits + return scores_processed + + +class BarkEosPrioritizerLogitsProcessor(LogitsProcessor): + r"""This processor ensures that the EOS token is selected if its probability is greater than the `min_eos_p`. + + + + This logits processor is exclusively compatible with + [Bark](https://huggingface.co/docs/transformers/en/model_doc/bark). See the model documentation for examples. + + + + Args: + eos_token_id (`Union[int, List[int]]`): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + min_eos_p (`float`, *optional*): + Minimum end of speech threshold. + """ + + def __init__(self, eos_token_id: Union[int, List[int]], min_eos_p: float): + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + self.eos_token_id = eos_token_id + if min_eos_p is not None and min_eos_p <= 0: + raise ValueError(f"`min_eos_p` has to be a positive float, but is {min_eos_p}") + self.min_eos_p = min_eos_p + + @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: + scores_processed = scores + if self.min_eos_p: + probs = torch.nn.functional.softmax(scores.float(), dim=-1) + # create scores full of -inf except for the eos_token_id + early_stop_scores = torch.ones_like(scores) * -float("inf") + early_stop_scores[:, self.eos_token_id] = scores[:, self.eos_token_id] + + do_early_stop = probs[:, self.eos_token_id] > self.min_eos_p + do_early_stop = torch.any(do_early_stop, dim=1, keepdim=True) + scores_processed = torch.where(do_early_stop, early_stop_scores, scores) + + return scores_processed diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py new file mode 100644 index 0000000000000000000000000000000000000000..8374b7f7ce9284f1e7cde44a17d72ea8973b3446 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/stopping_criteria.py @@ -0,0 +1,189 @@ +import time +import warnings +from abc import ABC +from copy import deepcopy +from typing import List, Optional, Union + +import torch + +from ..utils import add_start_docstrings, logging + + +logger = logging.get_logger(__name__) + + +STOPPING_CRITERIA_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax + or scores for each vocabulary token after SoftMax. If this stopping criteria depends on the `scores` input, + make sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`. + kwargs (`Dict[str, Any]`, *optional*): + Additional stopping criteria specific kwargs. + + Return: + `torch.BoolTensor`. (`torch.BoolTensor` of shape `(batch_size, 1)`), where `True` indicates we stop generation + for a particular row, `True` indicates we should continue. + +""" + + +class StoppingCriteria(ABC): + """Abstract base class for all stopping criteria that can be applied during generation. + + If your stopping criteria depends on the `scores` input, make sure you pass `return_dict_in_generate=True, + output_scores=True` to `generate`. + """ + + @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor: + raise NotImplementedError("StoppingCriteria needs to be subclassed") + + +class MaxLengthCriteria(StoppingCriteria): + """ + This class can be used to stop generation whenever the full generated number of tokens exceeds `max_length`. Keep + in mind for decoder-only type of transformers, this will include the initial prompted tokens. + + Args: + max_length (`int`): + The maximum length that the output sequence can have in number of tokens. + max_position_embeddings (`int`, *optional*): + The maximum model length, as defined by the model's `config.max_position_embeddings` attribute. + """ + + def __init__(self, max_length: int, max_position_embeddings: Optional[int] = None): + self.max_length = max_length + self.max_position_embeddings = max_position_embeddings + + @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor: + cur_len = input_ids.shape[-1] + is_done = cur_len >= self.max_length + if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: + logger.warning_once( + "This is a friendly reminder - the current text generation call will exceed the model's predefined " + f"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe " + "exceptions, performance degradation, or nothing at all." + ) + return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool) + + +class MaxNewTokensCriteria(StoppingCriteria): + """ + This class can be used to stop generation whenever the generated number of tokens exceeds `max_new_tokens`. Keep in + mind for decoder-only type of transformers, this will **not** include the initial prompted tokens. This is very + close to `MaxLengthCriteria` but ignores the number of initial tokens. + + Args: + start_length (`int`): + The number of initial tokens. + max_new_tokens (`int`): + The maximum number of tokens to generate. + """ + + def __init__(self, start_length: int, max_new_tokens: int): + warnings.warn( + "The class `MaxNewTokensCriteria` is deprecated. " + f"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` " + "with `max_length = start_length + max_new_tokens` instead.", + FutureWarning, + ) + self.start_length = start_length + self.max_new_tokens = max_new_tokens + self.max_length = start_length + max_new_tokens + + @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor: + is_done = input_ids.shape[-1] >= self.max_length + return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool) + + +class MaxTimeCriteria(StoppingCriteria): + """ + This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the + time will start being counted when you initialize this function. You can override this by passing an + `initial_time`. + + Args: + max_time (`float`): + The maximum allowed time in seconds for the generation. + initial_time (`float`, *optional*, defaults to `time.time()`): + The start of the generation allowed time. + """ + + def __init__(self, max_time: float, initial_timestamp: Optional[float] = None): + self.max_time = max_time + self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp + + @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor: + is_done = time.time() - self.initial_timestamp > self.max_time + return torch.full((input_ids.shape[0],), is_done, device=input_ids.device, dtype=torch.bool) + + +class EosTokenCriteria(StoppingCriteria): + """ + This class can be used to stop generation whenever the "end-of-sequence" token is generated. + By default, it uses the `model.generation_config.eos_token_id`. + + Args: + eos_token_id (`Union[int, List[int]]`): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + """ + + def __init__(self, eos_token_id: Union[int, List[int]]): + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + self.eos_token_id = torch.tensor(eos_token_id) + + @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor: + if input_ids.device.type == "mps": + # https://github.com/pytorch/pytorch/issues/77764#issuecomment-2067838075 + is_done = ( + input_ids[:, -1] + .tile(self.eos_token_id.shape[0], 1) + .eq(self.eos_token_id.unsqueeze(1).to(input_ids.device)) + .sum(dim=0) + .bool() + .squeeze() + ) + else: + is_done = torch.isin(input_ids[:, -1], self.eos_token_id.to(input_ids.device)) + return is_done + + +class StoppingCriteriaList(list): + @add_start_docstrings(STOPPING_CRITERIA_INPUTS_DOCSTRING) + def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.BoolTensor: + is_done = torch.full((input_ids.shape[0],), False, device=input_ids.device) + for criteria in self: + is_done = is_done | criteria(input_ids, scores, **kwargs) + return is_done + + @property + def max_length(self) -> Optional[int]: + for stopping_criterium in self: + if isinstance(stopping_criterium, MaxLengthCriteria): + return stopping_criterium.max_length + elif isinstance(stopping_criterium, MaxNewTokensCriteria): + return stopping_criterium.max_length + return None + + +def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int) -> StoppingCriteriaList: + stopping_max_length = stopping_criteria.max_length + new_stopping_criteria = deepcopy(stopping_criteria) + if stopping_max_length is not None and stopping_max_length != max_length: + warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter", UserWarning) + elif stopping_max_length is None: + new_stopping_criteria.append(MaxLengthCriteria(max_length=max_length)) + return new_stopping_criteria diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/streamers.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/streamers.py new file mode 100644 index 0000000000000000000000000000000000000000..c75b43466af7a8423d401639cce85411d4edcab5 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/streamers.py @@ -0,0 +1,227 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from queue import Queue +from typing import TYPE_CHECKING, Optional + + +if TYPE_CHECKING: + from ..models.auto import AutoTokenizer + + +class BaseStreamer: + """ + Base class from which `.generate()` streamers should inherit. + """ + + def put(self, value): + """Function that is called by `.generate()` to push new tokens""" + raise NotImplementedError() + + def end(self): + """Function that is called by `.generate()` to signal the end of generation""" + raise NotImplementedError() + + +class TextStreamer(BaseStreamer): + """ + Simple text streamer that prints the token(s) to stdout as soon as entire words are formed. + + + + The API for the streamer classes is still under development and may change in the future. + + + + Parameters: + tokenizer (`AutoTokenizer`): + The tokenized used to decode the tokens. + skip_prompt (`bool`, *optional*, defaults to `False`): + Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots. + decode_kwargs (`dict`, *optional*): + Additional keyword arguments to pass to the tokenizer's `decode` method. + + Examples: + + ```python + >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer + + >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") + >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") + >>> streamer = TextStreamer(tok) + + >>> # Despite returning the usual output, the streamer will also print the generated text to stdout. + >>> _ = model.generate(**inputs, streamer=streamer, max_new_tokens=20) + An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven, + ``` + """ + + def __init__(self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, **decode_kwargs): + self.tokenizer = tokenizer + self.skip_prompt = skip_prompt + self.decode_kwargs = decode_kwargs + + # variables used in the streaming process + self.token_cache = [] + self.print_len = 0 + self.next_tokens_are_prompt = True + + def put(self, value): + """ + Receives tokens, decodes them, and prints them to stdout as soon as they form entire words. + """ + if len(value.shape) > 1 and value.shape[0] > 1: + raise ValueError("TextStreamer only supports batch size 1") + elif len(value.shape) > 1: + value = value[0] + + if self.skip_prompt and self.next_tokens_are_prompt: + self.next_tokens_are_prompt = False + return + + # Add the new token to the cache and decodes the entire thing. + self.token_cache.extend(value.tolist()) + text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs) + + # After the symbol for a new line, we flush the cache. + if text.endswith("\n"): + printable_text = text[self.print_len :] + self.token_cache = [] + self.print_len = 0 + # If the last token is a CJK character, we print the characters. + elif len(text) > 0 and self._is_chinese_char(ord(text[-1])): + printable_text = text[self.print_len :] + self.print_len += len(printable_text) + # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, + # which may change with the subsequent token -- there are probably smarter ways to do this!) + else: + printable_text = text[self.print_len : text.rfind(" ") + 1] + self.print_len += len(printable_text) + + self.on_finalized_text(printable_text) + + def end(self): + """Flushes any remaining cache and prints a newline to stdout.""" + # Flush the cache, if it exists + if len(self.token_cache) > 0: + text = self.tokenizer.decode(self.token_cache, **self.decode_kwargs) + printable_text = text[self.print_len :] + self.token_cache = [] + self.print_len = 0 + else: + printable_text = "" + + self.next_tokens_are_prompt = True + self.on_finalized_text(printable_text, stream_end=True) + + def on_finalized_text(self, text: str, stream_end: bool = False): + """Prints the new text to stdout. If the stream is ending, also prints a newline.""" + print(text, flush=True, end="" if not stream_end else None) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + +class TextIteratorStreamer(TextStreamer): + """ + Streamer that stores print-ready text in a queue, to be used by a downstream application as an iterator. This is + useful for applications that benefit from acessing the generated text in a non-blocking way (e.g. in an interactive + Gradio demo). + + + + The API for the streamer classes is still under development and may change in the future. + + + + Parameters: + tokenizer (`AutoTokenizer`): + The tokenized used to decode the tokens. + skip_prompt (`bool`, *optional*, defaults to `False`): + Whether to skip the prompt to `.generate()` or not. Useful e.g. for chatbots. + timeout (`float`, *optional*): + The timeout for the text queue. If `None`, the queue will block indefinitely. Useful to handle exceptions + in `.generate()`, when it is called in a separate thread. + decode_kwargs (`dict`, *optional*): + Additional keyword arguments to pass to the tokenizer's `decode` method. + + Examples: + + ```python + >>> from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer + >>> from threading import Thread + + >>> tok = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") + >>> inputs = tok(["An increasing sequence: one,"], return_tensors="pt") + >>> streamer = TextIteratorStreamer(tok) + + >>> # Run the generation in a separate thread, so that we can fetch the generated text in a non-blocking way. + >>> generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=20) + >>> thread = Thread(target=model.generate, kwargs=generation_kwargs) + >>> thread.start() + >>> generated_text = "" + >>> for new_text in streamer: + ... generated_text += new_text + >>> generated_text + 'An increasing sequence: one, two, three, four, five, six, seven, eight, nine, ten, eleven,' + ``` + """ + + def __init__( + self, tokenizer: "AutoTokenizer", skip_prompt: bool = False, timeout: Optional[float] = None, **decode_kwargs + ): + super().__init__(tokenizer, skip_prompt, **decode_kwargs) + self.text_queue = Queue() + self.stop_signal = None + self.timeout = timeout + + def on_finalized_text(self, text: str, stream_end: bool = False): + """Put the new text in the queue. If the stream is ending, also put a stop signal in the queue.""" + self.text_queue.put(text, timeout=self.timeout) + if stream_end: + self.text_queue.put(self.stop_signal, timeout=self.timeout) + + def __iter__(self): + return self + + def __next__(self): + value = self.text_queue.get(timeout=self.timeout) + if value == self.stop_signal: + raise StopIteration() + else: + return value diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/tf_logits_process.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/tf_logits_process.py new file mode 100644 index 0000000000000000000000000000000000000000..fc9799b7ab39f19610faf3ac684e3cb287c95678 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/tf_logits_process.py @@ -0,0 +1,591 @@ +# coding=utf-8 +# Copyright 2022 The HuggingFace Inc. team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import inspect +from typing import List, Tuple + +import numpy as np +import tensorflow as tf + +from ..tf_utils import stable_softmax +from ..utils import add_start_docstrings +from ..utils.logging import get_logger + + +logger = get_logger(__name__) + + +TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" + Args: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + scores (`tf.Tensor` of shape `(batch_size, config.vocab_size)`): + Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam + search or log softmax for each vocabulary token when using beam search. + cur_len (`int`): + The current length of valid input sequence tokens. In the TF implementation, the input_ids' sequence length + is the maximum length generate can produce, and we need to know which of its tokens are valid. + kwargs (`Dict[str, Any]`, *optional*): + Additional logits processor specific kwargs. + + Return: + `tf.Tensor` of shape `(batch_size, config.vocab_size)`: The processed prediction scores. +""" + + +class TFLogitsProcessor: + """Abstract base class for all logit processors that can be applied during generation.""" + + @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + """TF method for processing logits.""" + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class TFLogitsWarper: + """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" + + @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + """TF method for warping logits.""" + raise NotImplementedError( + f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." + ) + + +class TFLogitsProcessorList(list): + """ + This class can be used to create a list of [`TFLogitsProcessor`] to subsequently process a `scores` input tensor. + This class inherits from list and adds a specific *__call__* method to apply each [`TFLogitsProcessor`] to the + inputs. + """ + + @add_start_docstrings(TF_LOGITS_PROCESSOR_INPUTS_DOCSTRING) + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int, **kwargs) -> tf.Tensor: + for processor in self: + function_args = inspect.signature(processor.__call__).parameters + if len(function_args) > 3: + if not all(arg in kwargs for arg in list(function_args.keys())[2:]): + raise ValueError( + f"Make sure that all the required parameters: {list(function_args.keys())} for " + f"{processor.__class__} are passed to the logits processor." + ) + scores = processor(input_ids, scores, cur_len, **kwargs) + else: + scores = processor(input_ids, scores, cur_len) + return scores + + +class TFTemperatureLogitsWarper(TFLogitsWarper): + r""" + [`TFLogitsWarper`] for temperature (exponential scaling output probability distribution). + + Args: + temperature (`float`): + The value used to module the logits distribution. + """ + + def __init__(self, temperature: float): + if not isinstance(temperature, float) or not (temperature > 0): + raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") + + self.temperature = temperature + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + scores = scores / self.temperature + return scores + + +class TFTopKLogitsWarper(TFLogitsWarper): + r""" + [`TFLogitsWarper`] that performs top-k, i.e. restricting to the k highest probability elements. + + Args: + top_k (`int`): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + """ + + def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + if not isinstance(top_k, int) or top_k <= 0: + raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") + + self.top_k = max(top_k, min_tokens_to_keep) + self.filter_value = filter_value + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + top_k = min(self.top_k, scores.shape[-1]) # Safety check + # Boolean mask containing all tokens with a probability less than the last token of the top-k + indices_to_remove = scores < tf.math.top_k(scores, k=top_k)[0][..., -1:] + next_scores = tf.where(indices_to_remove, self.filter_value, scores) + return next_scores + + +class TFTopPLogitsWarper(TFLogitsWarper): + """ + [`TFLogitsWarper`] that performs top-p, i.e. restricting to top tokens summing to <= prob_cut_off. + + Args: + top_p (`float`): + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. + filter_value (`float`, *optional*, defaults to -inf): + All filtered values will be set to this float value. + min_tokens_to_keep (`int`, *optional*, defaults to 1): + Minimum number of tokens that cannot be filtered. + """ + + def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): + if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0): + raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") + if not isinstance(min_tokens_to_keep, int) or (min_tokens_to_keep < 1): + raise ValueError(f"`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}") + + self.top_p = top_p + self.filter_value = filter_value + self.min_tokens_to_keep = min_tokens_to_keep + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + topk_scores, topk_indices = tf.math.top_k(scores, scores.shape[-1]) + + mask_scores = tf.fill(scores.shape, self.filter_value) + cumulative_probs = tf.math.cumsum(stable_softmax(topk_scores, axis=-1), axis=-1) + score_mask = cumulative_probs < self.top_p + + # Also include the token that is higher than top_p (the first false = shift and insert a True on the left) + score_mask = tf.concat((tf.ones([score_mask.shape[0], 1], dtype=tf.bool), score_mask[:, :-1]), axis=-1) + + # Ensure min tokens to keep + score_mask = tf.concat( + ( + tf.ones([score_mask.shape[0], self.min_tokens_to_keep], dtype=tf.bool), + score_mask[:, self.min_tokens_to_keep :], + ), + axis=-1, + ) + + # Mask the values that do not fit the criteria + topk_next_scores = tf.where(score_mask, topk_scores, mask_scores) + + # Undo the topk sorting: converts the 2D matrix of per-row original indices of shape (batch_size, vocab_size) + # to a 3D tensor of shape (batch_size, vocab_size, 2) containing the original score coordinate, from which we + # can scatter (i.e. `scatter_indices[row, col, :]` is a tensor containing `[row, topk_indices[row, col]]`) + scatter_rows = tf.tile(tf.expand_dims(tf.range(topk_indices.shape[0]), axis=-1), [1, topk_indices.shape[-1]]) + scatter_indices = tf.stack((scatter_rows, topk_indices), axis=-1) + next_scores = tf.scatter_nd(scatter_indices, topk_next_scores, shape=topk_next_scores.shape) + + return next_scores + + +class TFMinLengthLogitsProcessor(TFLogitsProcessor): + r""" + [`TFLogitsProcessor`] enforcing a min-length by setting EOS probability to 0. + + Args: + min_length (`int`): + The minimum length below which the score of `eos_token_id` is set to `-float("Inf")`. + eos_token_id (`int`): + The id of the *end-of-sequence* token. + """ + + def __init__(self, min_length: int, eos_token_id: int): + if not isinstance(min_length, int) or min_length < 0: + raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") + + if not isinstance(eos_token_id, int) or eos_token_id < 0: + raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") + + self.min_length = min_length + self.eos_token_id = eos_token_id + + def _apply_eos_token_mask(self, scores: tf.Tensor) -> tf.Tensor: + eos_token_id_mask = tf.range(scores.shape[-1]) == self.eos_token_id + scores = tf.where(eos_token_id_mask, float("-inf"), scores) + return scores + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + # applies eos token masking if the first argument is true + scores = tf.cond( + tf.less(cur_len, self.min_length), + lambda: self._apply_eos_token_mask(scores), + lambda: tf.identity(scores), + ) + return scores + + +class TFRepetitionPenaltyLogitsProcessor(TFLogitsProcessor): + r""" + [`TFLogitsProcessor`] enforcing an exponential penalty on repeated sequences. + + Args: + repetition_penalty (`float`): + The parameter for repetition penalty. 1.0 means no penalty. See [this + paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. + """ + + def __init__(self, penalty: float): + if not isinstance(penalty, float) or not (penalty > 0): + raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") + + self.penalty = penalty + + def _create_score_penalties(self, input_ids: tf.Tensor, logits: tf.Tensor) -> tf.Tensor: + # We want to populate the penalties in the positions of `input_ids`. Since XLA can't handle shapes unknown + # before runtime, `tf.unique` can't be used. Therefore, we may have redundant updates, when a given row has + # the same token multiple times. + + # Gathers the penalties to apply + logit_penalties = tf.gather(logits, input_ids, axis=1, batch_dims=1) + logit_penalties = tf.where(logit_penalties > 0, 1 / self.penalty, logit_penalties) + logit_penalties = tf.where(logit_penalties < 0, self.penalty, logit_penalties) + + # Scatters the penalties + token_penalties = tf.ones(logits.shape) + batch_size = input_ids.shape[0] + seq_len = tf.shape(input_ids)[1] # the sequence length has dynamic size, hence the dynamic shape + indexable_prev_input_ids = tf.concat( + ( + tf.expand_dims(tf.repeat(tf.range(batch_size), seq_len), axis=-1), + tf.expand_dims(tf.reshape(input_ids, [-1]), axis=-1), + ), + axis=1, + ) + token_penalties = tf.tensor_scatter_nd_update( + token_penalties, indices=indexable_prev_input_ids, updates=tf.reshape(logit_penalties, [-1]) + ) + return token_penalties + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + score_penalties = self._create_score_penalties(input_ids[:, :cur_len], scores) + + scores = tf.math.multiply(scores, score_penalties) + + return scores + + +class TFNoBadWordsLogitsProcessor(TFLogitsProcessor): + """ + [`TFLogitsProcessor`] that enforces that specified sequences will never be sampled. + + Args: + bad_words_ids (`List[List[int]]`): + List of list of token ids that are not allowed to be generated. In order to get the tokens of the words + that should not appear in the generated text, make sure to set `add_prefix_space=True` when initializing + the tokenizer, and use `tokenizer(bad_words, add_special_tokens=False).input_ids`. The `add_prefix_space` + argument is only supported for some slow tokenizers, as fast tokenizers' prefixing behaviours come from + `pre tokenizers`. Read more [here](https://huggingface.co/docs/tokenizers/api/pre-tokenizers). + eos_token_id (`int`): + The id of the *end-of-sequence* token. + """ + + def __init__(self, bad_words_ids: List[List[int]], eos_token_id: int): + if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0: + raise ValueError(f"`bad_words_ids` has to be a non-empty list, but is {bad_words_ids}.") + if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids): + raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.") + if any( + any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids) + for bad_word_ids in bad_words_ids + ): + raise ValueError( + f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}." + ) + + # stores the information about bad words in three tensors: + # 1. a rectangular tensor with the forbidden sequences (padded with `-1`), for full data comparisons + self.bad_word_seqs_ids = tf.ragged.constant(bad_words_ids).to_tensor(default_value=-1) + # 2. a tensor with the unpadded length of each forbidden sequence, for quick length comparisons + bad_word_seqs_len = [len(bad_words) for bad_words in bad_words_ids] + if any(word_len == 0 for word_len in bad_word_seqs_len): + raise ValueError(f"Banned words token sequences {bad_words_ids} cannot have an empty list") + self.bad_word_seqs_len = tf.convert_to_tensor(bad_word_seqs_len, dtype=tf.int32) + # 3. a tensor containing the last token for each sequence, for easy access to the tokens that may be banned + self.seq_forbidden_tokens = tf.convert_to_tensor([bad_words[-1] for bad_words in bad_words_ids]) + + def _calc_row_banned_bad_tokens(self, row_input_ids: tf.Tensor) -> tf.Tensor: + def _tokens_match(bad_word_seq_number): + def _len_one(): + # If the bad sequence only has one token, always mask it + return tf.cond( + tf.math.equal(self.bad_word_seqs_len[bad_word_seq_number], 1), + lambda: tf.ones((), dtype=tf.bool), + _len_greater_than_cur_len, + ) + + def _len_greater_than_cur_len(): + # Otherwise, if the bad sequence is longer than the current length they can't ever match + return tf.cond( + tf.math.greater(self.bad_word_seqs_len[bad_word_seq_number], tf.shape(row_input_ids)[0]), + lambda: tf.zeros((), dtype=tf.bool), + _match_found, + ) + + def _match_found(): + # Finaly, runs the actual comparison. Can only be called if the previous comparisons do not yield + # an answer (otherwise we get indexing exceptions) + compare_len = self.bad_word_seqs_len[bad_word_seq_number] - 1 + return tf.cond( + tf.math.reduce_all( + tf.math.equal( + row_input_ids[-compare_len:], self.bad_word_seqs_ids[bad_word_seq_number, :compare_len] + ) + ), + lambda: tf.ones((), dtype=tf.bool), + lambda: tf.zeros((), dtype=tf.bool), + ) + + match = _len_one() + return match + + # Compares the current row against all bad word sequences, obtaining a mask with the matches. + match_mask = tf.map_fn(_tokens_match, tf.range(self.bad_word_seqs_ids.shape[0]), fn_output_signature=tf.bool) + row_banned_tokens = self.seq_forbidden_tokens[match_mask] + return row_banned_tokens + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + # We want to mask some banned tokens, at a score level. Since the banned tokens depend on the previous + # `input_ids`, they may have a different length for each row, and they may even be empty for some rows. + # To remain simple and XLA-compatible, we work on a per-row fashion. + # TODO (Joao): this function might trigger XLA retracing as `cur_len` increases. Fix it if it becomes + # a frequent choke point. (make `cur_len` a tensor?) + def _get_row_updated_score(row_inputs: Tuple[tf.Tensor]) -> tf.Tensor: + row_input_ids, row_score = row_inputs + banned_tokens = self._calc_row_banned_bad_tokens(row_input_ids[:cur_len]) + banned_tokens_mask = tf.scatter_nd( + indices=tf.expand_dims(banned_tokens, axis=-1), + updates=tf.ones_like(banned_tokens, dtype=tf.bool), + shape=row_score.shape, + ) + row_score = tf.where(banned_tokens_mask, -float("inf"), row_score) + return row_score + + scores = tf.map_fn(_get_row_updated_score, (input_ids, scores), fn_output_signature=tf.float32) + return scores + + +class TFNoRepeatNGramLogitsProcessor(TFLogitsProcessor): + r""" + [`TFLogitsProcessor`] that enforces no repetition of n-grams. See + [Fairseq](https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345). + + Args: + ngram_size (`int`): + All ngrams of size `ngram_size` can only occur once. + """ + + def __init__(self, ngram_size: int): + if not isinstance(ngram_size, int) or ngram_size <= 0: + raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") + self.ngram_size = ngram_size + + def calc_banned_ngram_tokens(self, input_ids, num_hypos, cur_len): + # Copied from fairseq for no_repeat_ngram in beam_search + if cur_len + 1 < self.ngram_size: + # return no banned tokens if we haven't generated ngram_size tokens yet + return [[] for _ in range(num_hypos)] + generated_ngrams = [{} for _ in range(num_hypos)] + prev_input_ids = input_ids[:, :cur_len] + for idx in range(num_hypos): + gen_tokens = prev_input_ids[idx].numpy().tolist() + generated_ngram = generated_ngrams[idx] + for ngram in zip(*[gen_tokens[i:] for i in range(self.ngram_size)]): + prev_ngram_tuple = tuple(ngram[:-1]) + generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] + + def _get_generated_ngrams(hypo_idx): + # Before decoding the next token, prevent decoding of ngrams that have already appeared + start_idx = cur_len + 1 - self.ngram_size + ngram_idx = tuple(prev_input_ids[hypo_idx, start_idx:cur_len].numpy().tolist()) + return generated_ngrams[hypo_idx].get(ngram_idx, []) + + banned_tokens = [_get_generated_ngrams(hypo_idx) for hypo_idx in range(num_hypos)] + + return banned_tokens + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + # TODO (joao): enable XLA on this logits processor. See discussion and attempts in + # https://github.com/huggingface/transformers/pull/16974 + if not tf.executing_eagerly(): + raise NotImplementedError("TFNoRepeatNGramLogitsProcessor is only implemented for eager execution.") + + batch_size, vocab_size = scores.shape + banned_tokens = self.calc_banned_ngram_tokens(input_ids, batch_size, cur_len) + + # create banned_tokens boolean mask + banned_tokens_indices_mask = [] + for banned_tokens_slice in banned_tokens: + banned_tokens_indices_mask.append( + [True if token in banned_tokens_slice else False for token in range(vocab_size)] + ) + + scores = tf.where(tf.convert_to_tensor(banned_tokens_indices_mask, dtype=tf.bool), -float("inf"), scores) + + return scores + + +class TFForcedBOSTokenLogitsProcessor(TFLogitsProcessor): + r""" + [`TFLogitsProcessor`] that enforces the specified token as the first generated token. + + Args: + bos_token_id (`int`): + The id of the token to force as the first generated token. + """ + + def __init__(self, bos_token_id: int): + if bos_token_id < 0: + raise ValueError(f"The forced bos token id must be a non-negative integer, got {bos_token_id}") + self.bos_token_id = bos_token_id + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + if cur_len == 1: + batch_size, num_tokens = scores.shape + # sets the score to 0 in the bos_token_id column + scores = tf.zeros((batch_size, 1)) + # sets the score to -inf everywhere else + if self.bos_token_id > 0: + scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.bos_token_id)), scores), axis=-1) + if self.bos_token_id < (num_tokens - 1): + scores = tf.concat( + (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.bos_token_id))), + axis=-1, + ) + return scores + + +class TFForcedEOSTokenLogitsProcessor(TFLogitsProcessor): + r""" + [`TFLogitsProcessor`] that enforces the specified token as the last generated token when `max_length` is reached. + + Args: + max_length (`int`): + The maximum length of the sequence to be generated. + eos_token_id (`int`): + The id of the token to force as the last generated token when `max_length` is reached. + """ + + def __init__(self, max_length: int, eos_token_id: int): + self.max_length = max_length + if eos_token_id < 0: + raise ValueError(f"The forced eos token id must be a non-negative integer, got {eos_token_id}") + self.eos_token_id = eos_token_id + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + if cur_len == self.max_length - 1: + batch_size, num_tokens = scores.shape + # sets the score to 0 in the eos_token_id column + scores = tf.zeros((batch_size, 1)) + # sets the score to -inf everywhere else + if self.eos_token_id > 0: + scores = tf.concat((tf.broadcast_to(-float("inf"), (batch_size, self.eos_token_id)), scores), axis=-1) + if self.eos_token_id < (num_tokens - 1): + scores = tf.concat( + (scores, tf.broadcast_to(-float("inf"), (batch_size, (num_tokens - 1) - self.eos_token_id))), + axis=-1, + ) + return scores + + +class TFSuppressTokensAtBeginLogitsProcessor(TFLogitsProcessor): + r""" + [`TFSuppressTokensAtBeginLogitsProcessor`] suppresses a list of tokens as soon as the `generate` function starts + generating using `begin_index` tokens. This should ensure that the tokens defined by `begin_suppress_tokens` at not + sampled at the begining of the generation. + """ + + def __init__(self, begin_suppress_tokens, begin_index): + self.begin_suppress_tokens = list(begin_suppress_tokens) + self.begin_index = begin_index + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + scores = tf.cond( + tf.equal(cur_len, self.begin_index), + lambda: tf.tensor_scatter_nd_update( + scores, + indices=[[i, token] for i in range(scores.shape[0]) for token in self.begin_suppress_tokens], + updates=[-float("inf") for _ in range(scores.shape[0] * len(self.begin_suppress_tokens))], + ), + lambda: scores, + ) + return scores + + +class TFSuppressTokensLogitsProcessor(TFLogitsProcessor): + r"""This processor can be used to suppress a list of tokens. The processor will set their log probs to `-inf` so that they + are not sampled.""" + + def __init__(self, suppress_tokens): + self.suppress_tokens = list(suppress_tokens) + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + scores = tf.tensor_scatter_nd_update( + scores, + indices=[[i, token] for i in range(scores.shape[0]) for token in self.suppress_tokens], + updates=[-float("inf") for _ in range(scores.shape[0] * len(self.suppress_tokens))], + ) + return scores + + +class TFForceTokensLogitsProcessor(TFLogitsProcessor): + r"""This processor takes a list of pairs of integers which indicates a mapping from generation indices to token + indices that will be forced before sampling. The processor will set their log probs to `0` and all other tokens to + `-inf` so that they are sampled at their corresponding index.""" + + def __init__(self, force_token_map: List[List[int]]): + force_token_map = dict(force_token_map) + # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the + # index of the array corresponds to the index of the token to be forced, for XLA compatibility. + # Indexes without forced tokens will have an negative value. + force_token_array = np.ones((max(force_token_map.keys()) + 1), dtype=np.int32) * -1 + for index, token in force_token_map.items(): + if token is not None: + force_token_array[index] = token + self.force_token_array = tf.convert_to_tensor(force_token_array, dtype=tf.int32) + + def __call__(self, input_ids: tf.Tensor, scores: tf.Tensor, cur_len: int) -> tf.Tensor: + def _force_token(generation_idx): + batch_size = scores.shape[0] + current_token = self.force_token_array[generation_idx] + + new_scores = tf.ones_like(scores, dtype=scores.dtype) * -float("inf") + indices = tf.stack((tf.range(batch_size), tf.tile([current_token], [batch_size])), axis=1) + updates = tf.zeros((batch_size,), dtype=scores.dtype) + new_scores = tf.tensor_scatter_nd_update(new_scores, indices, updates) + return new_scores + + scores = tf.cond( + tf.greater_equal(cur_len, tf.shape(self.force_token_array)[0]), + # If the current length is geq than the length of force_token_array, the processor does nothing. + lambda: tf.identity(scores), + # Otherwise, it may force a certain token. + lambda: tf.cond( + tf.greater_equal(self.force_token_array[cur_len], 0), + # Only valid (positive) tokens are forced + lambda: _force_token(cur_len), + # Otherwise, the processor does nothing. + lambda: scores, + ), + ) + return scores diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/tf_utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/tf_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..90219c316b6c8c0bcc763fefff8af8269a0a77ee --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/tf_utils.py @@ -0,0 +1,3131 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import inspect +import warnings +from dataclasses import dataclass +from typing import Any, Dict, Optional, Tuple, Union + +import numpy as np +import tensorflow as tf +from tensorflow.compiler.tf2xla.python.xla import dynamic_update_slice + +from ..modeling_tf_outputs import TFCausalLMOutputWithPast, TFSeq2SeqLMOutput +from ..models.auto import ( + TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + TF_MODEL_FOR_VISION_2_SEQ_MAPPING, +) +from ..tf_utils import shape_list, stable_softmax +from ..utils import ModelOutput, logging +from .configuration_utils import GenerationConfig +from .tf_logits_process import ( + TFForcedBOSTokenLogitsProcessor, + TFForcedEOSTokenLogitsProcessor, + TFForceTokensLogitsProcessor, + TFLogitsProcessorList, + TFMinLengthLogitsProcessor, + TFNoBadWordsLogitsProcessor, + TFNoRepeatNGramLogitsProcessor, + TFRepetitionPenaltyLogitsProcessor, + TFSuppressTokensAtBeginLogitsProcessor, + TFSuppressTokensLogitsProcessor, + TFTemperatureLogitsWarper, + TFTopKLogitsWarper, + TFTopPLogitsWarper, +) + + +logger = logging.get_logger(__name__) + + +@dataclass +class TFGreedySearchDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using greedy search. + + + Args: + sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFGreedySearchEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention + weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the + encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + + Args: + sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + encoder_attentions: Optional[Tuple[tf.Tensor]] = None + encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None + decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFSampleDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using sampling. + + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. + attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(num_return_sequences*batch_size, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFSampleEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of + the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states + attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size*num_return_sequences, config.vocab_size)`. + encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size*num_return_sequences, + num_heads, sequence_length, sequence_length)`. + encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size*num_return_sequences, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_return_sequences, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + encoder_attentions: Optional[Tuple[tf.Tensor]] = None + encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None + decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFBeamSearchDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using beam search. + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log + softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this + beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. + beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `tf.Tensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + sequences_scores: Optional[tf.Tensor] = None + scores: Optional[Tuple[tf.Tensor]] = None + beam_indices: Optional[tf.Tensor] = None + attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFBeamSearchEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights + of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states + attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`tf.Tensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log + softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this + beam. `Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. + beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `tf.Tensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length, + sequence_length)`. + cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + sequences_scores: Optional[tf.Tensor] = None + scores: Optional[Tuple[tf.Tensor]] = None + beam_indices: Optional[tf.Tensor] = None + encoder_attentions: Optional[Tuple[tf.Tensor]] = None + encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None + decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFBeamSampleDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using beam sample. + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log + softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this + beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams*num_return_sequences, config.vocab_size)`. + beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `tf.Tensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + sequences_scores: Optional[tf.Tensor] = None + scores: Optional[Tuple[tf.Tensor]] = None + beam_indices: Optional[tf.Tensor] = None + attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFBeamSampleEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention + weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the + encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + Args: + sequences (`tf.Tensor` of shape `(batch_size*num_beams, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`tf.Tensor` of shape `(batch_size * num_return_sequence)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log + softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this + beam. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. + beam_indices (`tf.Tensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `tf.Tensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size*num_beams, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size*num_beams, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + sequences_scores: Optional[tf.Tensor] = None + scores: Optional[Tuple[tf.Tensor]] = None + beam_indices: Optional[tf.Tensor] = None + encoder_attentions: Optional[Tuple[tf.Tensor]] = None + encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None + decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFContrastiveSearchDecoderOnlyOutput(ModelOutput): + """ + Base class for outputs of decoder-only generation models using contrastive search. + + Args: + sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +@dataclass +class TFContrastiveSearchEncoderDecoderOutput(ModelOutput): + """ + Base class for outputs of encoder-decoder generation models using contrastive search. Hidden states and attention + weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the + encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) + + Args: + sequences (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `tf.Tensor` with up to `max_new_tokens` elements (one element for each + generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `tf.Tensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, sequence_length, + sequence_length)`. + encoder_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape + `(batch_size, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(tf.Tensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `tf.Tensor` of shape `(batch_size, generated_length, hidden_size)`. + """ + + sequences: tf.Tensor = None + scores: Optional[Tuple[tf.Tensor]] = None + encoder_attentions: Optional[Tuple[tf.Tensor]] = None + encoder_hidden_states: Optional[Tuple[tf.Tensor]] = None + decoder_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + cross_attentions: Optional[Tuple[Tuple[tf.Tensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[tf.Tensor]]] = None + + +TFGreedySearchOutput = Union[TFGreedySearchEncoderDecoderOutput, TFGreedySearchDecoderOnlyOutput] +TFSampleOutput = Union[TFSampleEncoderDecoderOutput, TFSampleDecoderOnlyOutput] +TFBeamSearchOutput = Union[TFBeamSearchEncoderDecoderOutput, TFBeamSearchDecoderOnlyOutput] +TFBeamSampleOutput = Union[TFBeamSampleEncoderDecoderOutput, TFBeamSampleDecoderOnlyOutput] +TFContrastiveSearchOutput = Union[TFContrastiveSearchEncoderDecoderOutput, TFContrastiveSearchDecoderOnlyOutput] +TFGenerateOutput = Union[ + TFGreedySearchOutput, TFSampleOutput, TFBeamSearchOutput, TFBeamSampleOutput, TFContrastiveSearchOutput +] + + +class TFGenerationMixin: + """ + A class containing all of the functions supporting generation, to be used as a mixin in [`TFPreTrainedModel`]. + + The class exposes [`~generation.TFGenerationMixin.generate`], which can be used for: + - *greedy decoding* by calling [`~generation.TFGenerationMixin.greedy_search`] if `num_beams=1` and + `do_sample=False` + - *contrastive search* by calling [`~generation.TFGenerationMixin.contrastive_search`] if `penalty_alpha>0` and + `top_k>1` + - *multinomial sampling* by calling [`~generation.TFGenerationMixin.sample`] if `num_beams=1` and + `do_sample=True` + - *beam-search decoding* by calling [`~generation.TFGenerationMixin.beam_search`] if `num_beams>1` + + You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To + learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). + """ + + _seed_generator = None + + @property + def seed_generator(self): + warnings.warn("`seed_generator` is deprecated and will be removed in a future version.", UserWarning) + if self._seed_generator is None: + self._seed_generator = tf.random.Generator.from_non_deterministic_state() + return self._seed_generator + + supports_xla_generation = True + + def prepare_inputs_for_generation(self, *args, **kwargs): + raise NotImplementedError( + "A model class needs to define a `prepare_inputs_for_generation` method in order to use `generate`." + ) + + def compute_transition_scores( + self, + sequences: tf.Tensor, + scores: Tuple[tf.Tensor], + beam_indices: Optional[tf.Tensor] = None, + normalize_logits: bool = False, + ) -> tf.Tensor: + """ + Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was + used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time. + + Parameters: + sequences (`tf.Tensor`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or + shorter if all batches finished early due to the `eos_token_id`. + scores (`tuple(tf.Tensor)`): + Transition scores for each vocabulary token at each generation step. Beam transition scores consisting + of log probabilities of tokens conditioned on log softmax of previously generated tokens Tuple of + `tf.Tensor` with up to `max_new_tokens` elements (one element for each generated token), with each + tensor of shape `(batch_size*num_beams, config.vocab_size)`. + beam_indices (`tf.Tensor`, *optional*): + Beam indices of generated token id at each generation step. `tf.Tensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at + generate-time. + normalize_logits (`bool`, *optional*, defaults to `False`): + Whether to normalize the logits (which, for legacy reasons, may be unnormalized). + + Return: + `tf.Tensor`: A `tf.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing + the transition scores (logits) + + Examples: + + ```python + >>> from transformers import GPT2Tokenizer, TFAutoModelForCausalLM + >>> import numpy as np + + >>> tokenizer = GPT2Tokenizer.from_pretrained("openai-community/gpt2") + >>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") + >>> tokenizer.pad_token_id = tokenizer.eos_token_id + >>> inputs = tokenizer(["Today is"], return_tensors="tf") + + >>> # Example 1: Print the scores for each token generated with Greedy Search + >>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True) + >>> transition_scores = model.compute_transition_scores( + ... outputs.sequences, outputs.scores, normalize_logits=True + ... ) + >>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for + >>> # encoder-decoder models, like BART or T5. + >>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1] + >>> generated_tokens = outputs.sequences[:, input_length:] + >>> for tok, score in zip(generated_tokens[0], transition_scores[0]): + ... # | token | token string | logits | probability + ... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}") + | 262 | the | -1.413 | 24.33% + | 1110 | day | -2.609 | 7.36% + | 618 | when | -2.009 | 13.41% + | 356 | we | -1.859 | 15.58% + | 460 | can | -2.508 | 8.14% + + >>> # Example 2: Reconstruct the sequence scores from Beam Search + >>> outputs = model.generate( + ... **inputs, + ... max_new_tokens=5, + ... num_beams=4, + ... num_return_sequences=4, + ... return_dict_in_generate=True, + ... output_scores=True, + ... ) + >>> transition_scores = model.compute_transition_scores( + ... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False + ... ) + >>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores. + >>> # Tip: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the + >>> # use case, you might want to recompute it with `normalize_logits=True`. + >>> output_length = input_length + np.sum(transition_scores.numpy() < 0, axis=1) + >>> length_penalty = model.generation_config.length_penalty + >>> reconstructed_scores = np.sum(transition_scores, axis=1) / (output_length**length_penalty) + >>> print(np.allclose(outputs.sequences_scores, reconstructed_scores)) + True + ```""" + # 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent + # to a beam search approach were the first (and only) beam is always selected + if beam_indices is None: + beam_indices = tf.tile(tf.expand_dims(tf.range(scores[0].shape[0]), axis=1), [1, len(scores)]) + + # 2. reshape scores as [batch_size, vocab_size, # generation steps] with # generation steps being + # seq_len - input_length + scores = tf.transpose(tf.reshape(tf.stack(scores), (len(scores), -1)), (1, 0)) + scores = tf.reshape(scores, (-1, self.config.vocab_size, scores.shape[-1])) + + # 3. Optionally normalize the logits (across the vocab dimension) + if normalize_logits: + scores = tf.nn.log_softmax(scores, axis=1) + + # 4. cut beam_indices to longest beam length + beam_indices_mask = beam_indices < 0 + max_beam_length = tf.math.reduce_max( + tf.math.reduce_sum((1 - tf.cast(beam_indices_mask, dtype=tf.int32)), axis=-1) + ) + beam_indices = beam_indices[:, -max_beam_length:] + beam_indices_mask = beam_indices_mask[:, -max_beam_length:] + + # 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards + beam_indices = tf.where(beam_indices_mask, 0, beam_indices) + + # 6. Define which indices contributed to scores + cut_idx = sequences.shape[-1] - max_beam_length + token_indices = sequences[:, cut_idx:] + gen_step_idx = tf.broadcast_to(tf.range(scores.shape[-1]), token_indices.shape) + indices = tf.stack([beam_indices, token_indices, gen_step_idx], axis=-1) + + # 7. Compute scores + transition_scores = tf.gather_nd(scores, indices) + + # 8. Mask out transition_scores of beams that stopped early + transition_scores = tf.where(beam_indices_mask, 0, transition_scores) + + return transition_scores + + def _validate_model_class(self): + """ + Confirms that the model class is compatible with generation. If not, raises an exception that points to the + right class to use. + """ + if not self.can_generate(): + generate_compatible_mappings = [ + TF_MODEL_FOR_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_VISION_2_SEQ_MAPPING, + TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + TF_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + ] + generate_compatible_classes = set() + for model_mapping in generate_compatible_mappings: + supported_models = model_mapping.get(type(self.config), default=None) + if supported_models is not None: + generate_compatible_classes.add(supported_models.__name__) + exception_message = ( + f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " + "it doesn't have a language model head." + ) + if generate_compatible_classes: + exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" + raise TypeError(exception_message) + + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): + """Validates model kwargs for generation. Generate argument typos will also be caught here.""" + # Excludes arguments that are handled before calling any model function + if self.config.is_encoder_decoder: + for key in ["decoder_input_ids"]: + model_kwargs.pop(key, None) + + unused_model_args = [] + model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) + # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If + # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) + if "kwargs" in model_args or "model_kwargs" in model_args: + model_args |= set(inspect.signature(self.call).parameters) + for key, value in model_kwargs.items(): + if value is not None and key not in model_args: + unused_model_args.append(key) + + if unused_model_args: + raise ValueError( + f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" + " generate arguments will also show up in this list)" + ) + + def generate( + self, + inputs: Optional[tf.Tensor] = None, + generation_config: Optional[GenerationConfig] = None, + logits_processor: Optional[TFLogitsProcessorList] = None, + seed=None, + **kwargs, + ) -> Union[TFGenerateOutput, tf.Tensor]: + r""" + Generates sequences of token ids for models with a language modeling head. + + + + Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the + model's default generation configuration. You can override any `generation_config` by passing the corresponding + parameters to generate, e.g. `.generate(inputs, num_beams=4, do_sample=True)`. + + For an overview of generation strategies and code examples, check out the [following + guide](../generation_strategies). + + + + Parameters: + inputs (`tf.Tensor` of varying shape depending on the modality, *optional*): + The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the + method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` + should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of + `input_ids`, `input_values`, `input_features`, or `pixel_values`. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which had the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + logits_processor (`LogitsProcessorList`, *optional*): + Custom logits processors that complement the default logits processors built from arguments and + generation config. If a logit processor is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + seed (`List[int]`, *optional*): + Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the + `seed` argument from stateless functions in `tf.random`. + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder + specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. + + Return: + [`~utils.ModelOutput`] or `tf.Tensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` or when + `config.return_dict_in_generate=True`) or a `tf.Tensor`. + + If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.TFGreedySearchDecoderOnlyOutput`], + - [`~generation.TFSampleDecoderOnlyOutput`], + - [`~generation.TFBeamSearchDecoderOnlyOutput`], + - [`~generation.TFBeamSampleDecoderOnlyOutput`] + + If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.TFGreedySearchEncoderDecoderOutput`], + - [`~generation.TFSampleEncoderDecoderOutput`], + - [`~generation.TFBeamSearchEncoderDecoderOutput`], + - [`~generation.TFBeamSampleEncoderDecoderOutput`] + + """ + + # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call + self._validate_model_class() + + # priority: `generation_config` argument > `model.generation_config` (the default generation config) + if generation_config is None: + # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior, + # two conditions must be met + # 1) the generation config must have been created from the model config (`_from_model_config` field); + # 2) the generation config must have seen no modification since its creation (the hash is the same). + if self.generation_config._from_model_config and self.generation_config._original_object_hash == hash( + self.generation_config + ): + new_generation_config = GenerationConfig.from_model_config(self.config) + if new_generation_config != self.generation_config: + warnings.warn( + "You have modified the pretrained model configuration to control generation. This is a" + " deprecated strategy to control generation and will be removed soon, in a future version." + " Please use and modify the model generation configuration (see" + " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )" + ) + self.generation_config = new_generation_config + generation_config = self.generation_config + + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs + self._validate_model_kwargs(model_kwargs.copy()) + + # 2. Cast input dtypes to tf.int32 unless they're floats (which happens for some image models) + if inputs is not None: + if isinstance(inputs, tf.Tensor) and inputs.dtype.is_floating: + pass + elif isinstance(inputs, np.ndarray) and np.issubdtype(inputs.dtype, np.floating): + pass + else: + inputs = tf.cast(inputs, tf.int32) + if model_kwargs.get("attention_mask") is not None: + model_kwargs["attention_mask"] = tf.cast(model_kwargs["attention_mask"], tf.int32) + if "decoder_input_ids" in model_kwargs: + if ( + isinstance(model_kwargs["decoder_input_ids"], tf.Tensor) + and model_kwargs["decoder_input_ids"].dtype.is_floating + ): + pass + elif isinstance(model_kwargs["decoder_input_ids"], np.ndarray) and np.issubdtype( + model_kwargs["decoder_input_ids"].dtype, np.floating + ): + pass + else: + model_kwargs["decoder_input_ids"] = tf.cast(model_kwargs["decoder_input_ids"], tf.int32) + + # 3. Set generation parameters if not already defined + logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() + + if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: + if model_kwargs.get("attention_mask") is None: + logger.warning( + "The attention mask and the pad token id were not set. As a consequence, you may observe " + "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." + ) + eos_token_id = generation_config.eos_token_id + if isinstance(eos_token_id, list): + eos_token_id = eos_token_id[0] + logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") + generation_config.pad_token_id = eos_token_id + + use_xla = not tf.executing_eagerly() + if use_xla and not self.supports_xla_generation: + raise ValueError( + "The selected model does not support Graph mode nor XLA generation (e.g. from tf.function())" + ) + + # 4. Define model inputs + inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( + inputs, generation_config.bos_token_id, model_kwargs + ) + # inputs_ids now has to be defined and cannot be None anymore + batch_size = shape_list(inputs_tensor)[0] + + # 5. Prepare other model kwargs + model_kwargs["output_attentions"] = generation_config.output_attentions + model_kwargs["output_hidden_states"] = generation_config.output_hidden_states + model_kwargs["use_cache"] = generation_config.use_cache + + accepts_attention_mask = "attention_mask" in set(inspect.signature(self.call).parameters.keys()) + requires_attention_mask = "encoder_outputs" not in model_kwargs + + if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask: + model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( + inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id + ) + + # decoder-only models should use left-padding for generation + if not self.config.is_encoder_decoder: + if generation_config.pad_token_id is not None and tf.math.reduce_any( + inputs_tensor[:, -1] == generation_config.pad_token_id + ): + logger.warning( + "A decoder-only architecture is being used, but right-padding was detected! For correct " + "generation results, please set `padding_side='left'` when initializing the tokenizer." + ) + if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: + # if model is encoder decoder encoder_outputs are created and added to `model_kwargs` + model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( + inputs_tensor, model_kwargs, model_input_name + ) + + # 6. Prepare model inputs which will be used for auto-regressive generation + if self.config.is_encoder_decoder: + input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( + batch_size=batch_size, + model_input_name=model_input_name, + model_kwargs=model_kwargs, + decoder_start_token_id=generation_config.decoder_start_token_id, + bos_token_id=generation_config.bos_token_id, + ) + else: + input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids") + + # 7. Prepare `max_length` depending on other stopping criteria. + input_ids_seq_length = shape_list(input_ids)[-1] + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20: + # 20 is the default max_length of the generation config + warnings.warn( + f"Using the model-agnostic default `max_length` (={generation_config.max_length}) " + "to control the generation length. recommend setting `max_new_tokens` to control the maximum length of the generation.", + UserWarning, + ) + elif generation_config.max_new_tokens is not None: + if not has_default_max_length and generation_config.max_length is not None: + logger.warning( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" + ) + generation_config.max_length = generation_config.max_new_tokens + input_ids_seq_length + + # If the input length is a tensor (i.e. dynamic length), skip length checks + if not isinstance(input_ids_seq_length, tf.Tensor): + if ( + generation_config.min_length is not None + and generation_config.min_length > generation_config.max_length + ): + raise ValueError( + f"Unfeasable length constraints: the minimum length ({generation_config.min_length}) is larger" + f" than the maximum length ({generation_config.max_length})" + ) + if input_ids_seq_length >= generation_config.max_length: + input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" + logger.warning( + f"Input length of {input_ids_string} is {input_ids_seq_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing`max_new_tokens`." + ) + + # 8. determine generation mode + is_contrastive_search_gen_mode = ( + generation_config.top_k is not None + and generation_config.top_k > 1 + and generation_config.do_sample is False + and generation_config.penalty_alpha is not None + and generation_config.penalty_alpha > 0 + ) + is_greedy_gen_mode = ( + not is_contrastive_search_gen_mode + and (generation_config.num_beams == 1) + and generation_config.do_sample is False + ) + is_beam_gen_mode = ( + not is_contrastive_search_gen_mode + and (generation_config.num_beams > 1) + and generation_config.do_sample is False + ) + is_sample_gen_mode = (generation_config.num_beams == 1) and generation_config.do_sample is True + is_beam_sample_gen_mode = (generation_config.num_beams > 1) and generation_config.do_sample is True + + # 9. prepare distribution pre_processing samplers + logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_seq_length, + logits_processor=logits_processor, + ) + + # 10. go into different generation modes + if is_greedy_gen_mode: + if generation_config.num_return_sequences > 1: + raise ValueError( + f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" + " greedy search." + ) + # 11. run greedy search + return self.greedy_search( + input_ids, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + logits_processor=logits_processor, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + **model_kwargs, + ) + elif is_contrastive_search_gen_mode: + if generation_config.num_return_sequences > 1: + raise ValueError( + f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing" + " contrastive search." + ) + # 11. run contrastive search + return self.contrastive_search( + input_ids, + top_k=generation_config.top_k, + penalty_alpha=generation_config.penalty_alpha, + logits_processor=logits_processor, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + **model_kwargs, + ) + elif is_sample_gen_mode: + # 11. prepare logits warper + logits_warper = self._get_logits_warper(generation_config=generation_config) + + # 12. expand input_ids with `num_return_sequences` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_return_sequences, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + + # 13. run sample + return self.sample( + input_ids, + logits_processor=logits_processor, + logits_warper=logits_warper, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + seed=seed, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + **model_kwargs, + ) + + elif is_beam_gen_mode: + if generation_config.num_beams < generation_config.num_return_sequences: + raise ValueError( + "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" + f" num_return_sequences, got {generation_config.num_beams} and" + f" {generation_config.num_return_sequences} (respectivelly)" + ) + + # 11. broadcast inputs to the desired number of beams + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + expand_in_new_axis=True, + **model_kwargs, + ) + + # 12. run beam search + return self.beam_search( + input_ids, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + length_penalty=generation_config.length_penalty, + early_stopping=generation_config.early_stopping, + logits_processor=logits_processor, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + num_return_sequences=generation_config.num_return_sequences, + **model_kwargs, + ) + + elif is_beam_sample_gen_mode: + if generation_config.num_beams < generation_config.num_return_sequences: + raise ValueError( + "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >=" + f" num_return_sequences, got {generation_config.num_beams} and" + f" {generation_config.num_return_sequences} (respectivelly)" + ) + + # 11. prepare logits warper + logits_warper = self._get_logits_warper(generation_config=generation_config) + + # 12. broadcast inputs to the desired number of beams + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + expand_in_new_axis=True, + **model_kwargs, + ) + + # 13. run beam sample (beam search with sampling) + return self.beam_search( + input_ids, + do_sample=True, + max_length=generation_config.max_length, + pad_token_id=generation_config.pad_token_id, + eos_token_id=generation_config.eos_token_id, + length_penalty=generation_config.length_penalty, + early_stopping=generation_config.early_stopping, + logits_processor=logits_processor, + logits_warper=logits_warper, + output_scores=generation_config.output_scores, + return_dict_in_generate=generation_config.return_dict_in_generate, + num_return_sequences=generation_config.num_return_sequences, + **model_kwargs, + ) + + def _prepare_attention_mask_for_generation( + self, + inputs: tf.Tensor, + pad_token_id: Optional[int], + eos_token_id: Optional[int], + ) -> tf.Tensor: + is_input_ids = len(inputs.shape) == 2 and inputs.dtype in (tf.int32, tf.int64) + is_pad_token_in_inputs = (pad_token_id is not None) and tf.math.reduce_any(inputs == pad_token_id) + is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id != eos_token_id) + + # Check if input is input_ids and padded -> only then is attention_mask defined + if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: + return tf.cast(tf.math.not_equal(inputs, pad_token_id), dtype=tf.int32) + else: + return tf.ones(inputs.shape[:2], dtype=tf.int32) + + def _prepare_encoder_decoder_kwargs_for_generation( + self, inputs_tensor: tf.Tensor, model_kwargs, model_input_name: Optional[str] = None + ) -> Dict[str, Any]: + # 1. get encoder and store encoder outputs + encoder = self.get_encoder() + + # 2. prepare encoder args and encoder kwargs from model kwargs + irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] + encoder_kwargs = { + argument: value + for argument, value in model_kwargs.items() + if not any(argument.startswith(p) for p in irrelevant_prefix) + } + encoder_signature = set(inspect.signature(encoder.call).parameters) + encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature + if not encoder_accepts_wildcard: + encoder_kwargs = { + argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature + } + + # 3. vision models don't use `attention_mask`. + encoder_kwargs["return_dict"] = True + encoder_kwargs[model_input_name] = inputs_tensor + if model_input_name != self.main_input_name: # in Keras, the first input must always be passed + encoder_kwargs[self.main_input_name] = None + encoder_outputs = encoder(**encoder_kwargs) + model_kwargs["encoder_outputs"] = encoder_outputs + + return model_kwargs + + def _prepare_decoder_input_ids_for_generation( + self, + batch_size: int, + model_input_name: str, + model_kwargs: Dict[str, tf.Tensor], + decoder_start_token_id: int = None, + bos_token_id: int = None, + ) -> Tuple[tf.Tensor, Dict[str, tf.Tensor]]: + """Prepares `decoder_input_ids` for generation with encoder-decoder models""" + # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming, + # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input. + if model_kwargs is not None and "decoder_input_ids" in model_kwargs: + decoder_input_ids = model_kwargs.pop("decoder_input_ids") + elif "input_ids" in model_kwargs and model_input_name != "input_ids": + decoder_input_ids = model_kwargs.pop("input_ids") + else: + decoder_input_ids = None + + # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that. + decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) + decoder_input_ids_start = tf.ones((batch_size, 1), dtype=tf.int32) * decoder_start_token_id + + # no user input -> use decoder_start_token_id as decoder_input_ids + if decoder_input_ids is None: + decoder_input_ids = decoder_input_ids_start + # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust + # decoder_attention_mask if provided) + elif tf.reduce_all(decoder_input_ids[:, 0] != decoder_start_token_id): + decoder_input_ids = tf.concat([decoder_input_ids_start, decoder_input_ids], axis=-1) + if "decoder_attention_mask" in model_kwargs: + decoder_attention_mask = model_kwargs["decoder_attention_mask"] + decoder_attention_mask = tf.concat( + (tf.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), + axis=-1, + ) + model_kwargs["decoder_attention_mask"] = decoder_attention_mask + + return decoder_input_ids, model_kwargs + + def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: + # retrieve decoder_start_token_id for encoder-decoder models + # fall back to bos_token_id if necessary + decoder_start_token_id = ( + decoder_start_token_id + if decoder_start_token_id is not None + else self.generation_config.decoder_start_token_id + ) + bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id + + if decoder_start_token_id is not None: + return decoder_start_token_id + elif bos_token_id is not None: + return bos_token_id + raise ValueError( + "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." + ) + + @staticmethod + def _expand_inputs_for_generation( + expand_size: int = 1, + is_encoder_decoder: bool = False, + input_ids: Optional[tf.Tensor] = None, + expand_in_new_axis: bool = False, + **model_kwargs, + ) -> Tuple[tf.Tensor, Dict[str, Any]]: + """ + Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...] or [batch_size, expand_size, ...], + depending on `expand_in_new_axis`. Beam-based approaches expect this function to be used with + `expand_in_new_axis=True` + """ + + def _expand_tensor(tensor: tf.Tensor): + if expand_in_new_axis: + shape = shape_list(tensor) + return tf.broadcast_to(tensor[:, None], (shape[0], expand_size) + tuple(shape[1:])) + else: + return tf.repeat(tensor, expand_size, axis=0) + + def _expand_dict_for_generation(dict_to_expand): + for key in dict_to_expand: + if dict_to_expand[key] is not None and isinstance(dict_to_expand[key], tf.Tensor): + dict_to_expand[key] = _expand_tensor(dict_to_expand[key]) + return dict_to_expand + + if input_ids is not None: + input_ids = _expand_tensor(input_ids) + + model_kwargs = _expand_dict_for_generation(model_kwargs) + + if is_encoder_decoder: + if model_kwargs.get("encoder_outputs") is None: + raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") + model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) + + return input_ids, model_kwargs + + def _prepare_model_inputs( + self, + inputs: Optional[tf.Tensor] = None, + bos_token_id: Optional[int] = None, + model_kwargs: Optional[Dict[str, tf.Tensor]] = None, + ) -> Tuple[tf.Tensor, Optional[str], Dict[str, tf.Tensor]]: + """ + This function extracts the model-specific `inputs` for generation. + """ + # 1. retrieve all kwargs that are non-None or non-model input related. + # some encoder-decoder models have different names for model and encoder + if ( + self.config.is_encoder_decoder + and hasattr(self, "encoder") + and hasattr(self.encoder, "main_input_name") + and self.encoder.main_input_name != self.main_input_name + ): + input_name = self.encoder.main_input_name + else: + input_name = self.main_input_name + + model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name} + + # 2. check whether model_input_name is passed as kwarg + # if yes and `inputs` is None use kwarg inputs + inputs_kwarg = model_kwargs.pop(input_name, None) + if inputs_kwarg is not None and inputs is not None: + raise ValueError( + f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. " + f"Make sure to either pass {inputs} or {input_name}=..." + ) + elif inputs_kwarg is not None: + inputs = inputs_kwarg + + # 3. In the presence of `inputs_embeds` for text models: + # - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model + # doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with + # input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`) + # - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and + # pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states. + if input_name == "input_ids" and "inputs_embeds" in model_kwargs: + if not self.config.is_encoder_decoder: + has_inputs_embeds_forwarding = "inputs_embeds" in set( + inspect.signature(self.prepare_inputs_for_generation).parameters.keys() + ) + if not has_inputs_embeds_forwarding: + raise ValueError( + f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} " + "doesn't have its forwarding implemented. See the GPT2 implementation for an example " + "(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!" + ) + # In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of + # the attention mask) can rely on the actual model input. + model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation( + inputs, bos_token_id, model_kwargs=model_kwargs + ) + else: + if inputs is not None: + raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.") + inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" + + # 4. if `inputs` is still None, try to create `input_ids` from BOS token + inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs) + + return inputs, input_name, model_kwargs + + def _maybe_initialize_input_ids_for_generation( + self, + inputs: Optional[tf.Tensor] = None, + bos_token_id: Optional[int] = None, + model_kwargs: Optional[Dict[str, tf.Tensor]] = None, + ) -> tf.Tensor: + """Initializes input ids for generation, if necessary.""" + if inputs is not None: + return inputs + + encoder_outputs = model_kwargs.get("encoder_outputs") + if self.config.is_encoder_decoder and encoder_outputs is not None: + # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding + shape = encoder_outputs.last_hidden_state.shape[:-1] + return tf.ones(shape, dtype=tf.int32) * -100 + + if bos_token_id is None: + raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") + + # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with + # soft-prompting or in multimodal implementations built on top of decoder-only language models. + batch_size = 1 + for value in model_kwargs.values(): + if isinstance(value, tf.Tensor): + batch_size = value.shape[0] + break + return tf.ones((batch_size, 1), dtype=tf.int32) * bos_token_id + + @staticmethod + def _extract_past_from_model_output(outputs: ModelOutput): + past_key_values = None + if "past_key_values" in outputs: + past_key_values = outputs.past_key_values + elif "mems" in outputs: + past_key_values = outputs.mems + elif "past_buckets_states" in outputs: + past_key_values = outputs.past_buckets_states + return past_key_values + + def _update_model_kwargs_for_generation( + self, outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False + ) -> Dict[str, Any]: + # update past_key_values + model_kwargs["past_key_values"] = self._extract_past_from_model_output(outputs) + + # update attention mask + if not is_encoder_decoder: + if "attention_mask" in model_kwargs: + attention_mask = model_kwargs["attention_mask"] + model_kwargs["attention_mask"] = tf.concat( + [attention_mask, tf.ones((shape_list(attention_mask)[0], 1), dtype=tf.int32)], axis=-1 + ) + + return model_kwargs + + def _update_model_kwargs_for_xla_generation( + self, + model_outputs: ModelOutput, + model_kwargs: Dict[str, Any], + cur_len: int, + max_length: int, + batch_size: int, + is_encoder_decoder: bool = False, + batch_axis: int = 0, + ): + def _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder): + """initializes the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`""" + if is_encoder_decoder: + # One 1 for decoder_start_token_id, 0s for the currently-unfilled locations in the past_key_values tensor, + # 1s for the actual input_ids + decoder_attention_mask = tf.concat( + [ + tf.ones((batch_size, 1), dtype=tf.int32), + tf.zeros((batch_size, num_padding_values), dtype=tf.int32), + tf.ones((batch_size, 1), dtype=tf.int32), + ], + axis=1, + ) + mask = {"decoder_attention_mask": decoder_attention_mask} + else: + attention_mask = model_kwargs.pop("attention_mask") + # 0s for the currently-unfilled locations in the past_key_values tensor, 1s for the actual input_ids + attention_mask = tf.concat( + [ + attention_mask, + tf.zeros((batch_size, num_padding_values), dtype=attention_mask.dtype), + tf.ones((batch_size, 1), dtype=attention_mask.dtype), + ], + axis=1, + ) + mask = {"attention_mask": attention_mask} + return mask + + def _update_attention(model_kwargs, new_past_index, is_encoder_decoder): + """updates the appropriate attention mask -- encoder-decoder models use `decoder_attention_mask`""" + update_start = tf.constant([0, 1], dtype=tf.int32) * new_past_index + if is_encoder_decoder: + decoder_attention_mask = model_kwargs.pop("decoder_attention_mask") + decoder_attention_mask_update_slice = tf.ones((batch_size, 1), dtype=decoder_attention_mask.dtype) + decoder_attention_mask = dynamic_update_slice( + decoder_attention_mask, decoder_attention_mask_update_slice, update_start + ) + mask = {"decoder_attention_mask": decoder_attention_mask} + else: + attention_mask = model_kwargs.pop("attention_mask") + attention_mask_update_slice = tf.ones((batch_size, 1), dtype=attention_mask.dtype) + attention_mask = dynamic_update_slice(attention_mask, attention_mask_update_slice, update_start) + mask = {"attention_mask": attention_mask} + return mask + + def _initialize_past(past_key_values, num_padding_values, batch_axis): + """initialize past_key_values with zeros -- the structure depends on `batch_axis`""" + if batch_axis == 0: + padding_values = tf.constant([[0, 0], [0, 0], [0, num_padding_values], [0, 0]], dtype=tf.int32) + new_past = () + for past_layer in past_key_values: + new_past_layer = list(past_layer) + for i in range(len(new_past_layer[:2])): + new_past_layer[i] = tf.pad(past_layer[i], padding_values) + new_past += (tuple(new_past_layer),) + else: + padding_values = tf.scatter_nd(indices=[[3, 1]], updates=[num_padding_values], shape=(5, 2)) + new_past = list(past_key_values) + for i in range(len(past_key_values)): + new_past[i] = tf.pad(past_key_values[i], padding_values) + return new_past + + def _update_past(past_key_values, new_past_index, batch_axis): + if batch_axis == 0: + slice_start_base = tf.constant([0, 0, 1, 0]) + new_past = () + for past_layer in past_key_values: + new_past_layer = list(past_layer) + for i in range(len(new_past_layer[:2])): + update_slice = past_layer[i][:, :, -1:] + # Write the last slice to the first open location in the padded past_key_values array + # and then truncate the last slice off the array + new_past_layer[i] = dynamic_update_slice( + past_layer[i][:, :, :-1], update_slice, slice_start_base * new_past_index + ) + new_past += (tuple(new_past_layer),) + else: + slice_start_base = tf.constant([0, 0, 0, 1, 0]) + new_past = [None for _ in range(len(past_key_values))] + for i in range(len(past_key_values)): + update_slice = past_key_values[i][:, :, :, -1:] + # Write the last slice to the first open location in the padded past_key_values array + # and then truncate the last slice off the array + new_past[i] = dynamic_update_slice( + past_key_values[i][:, :, :, :-1], update_slice, slice_start_base * new_past_index + ) + return new_past + + past_key_values = self._extract_past_from_model_output(model_outputs) + if past_key_values is None: + raise ValueError( + "No known `past_key_values variable` found in model outputs (model outputs keys:" + f" {list(model_outputs.keys())})" + ) + is_past_initialized = model_kwargs.pop("past_key_values", None) is not None + + if not is_past_initialized: + # The padded version of `past_key_values` has a length of `max_length - 1`, as `past_key_values` holds information relative to + # previous autoregressive generation steps (step 0 has no past_key_values, step 1 has 1 past_key_values value, ..., the last step + # has `max_length - 1` past_key_values values). + num_padding_values = max_length - cur_len - 1 + mask = _initialize_attention(model_kwargs, num_padding_values, is_encoder_decoder) + new_past = _initialize_past(past_key_values, num_padding_values, batch_axis) + else: + # The new index of past_key_values to be filled corresponds to the current length of the sequence, with two + # subtractions: -1 because past_key_values holds information regarding previous generation steps (read comment above) + # and -1 again because in an array the index is the length of the array minus 1. + new_past_index = cur_len - 2 + mask = _update_attention(model_kwargs, new_past_index, is_encoder_decoder) + new_past = _update_past(past_key_values, new_past_index, batch_axis) + + # sets the updated variables (mask and past_key_values) + model_kwargs.update(mask) + model_kwargs["past_key_values"] = tuple(new_past) + + return model_kwargs + + def _get_logits_warper( + self, + generation_config: GenerationConfig, + ) -> TFLogitsProcessorList: + """ + This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsWarper`] + instances used for multinomial sampling. + """ + + # instantiate warpers list + warpers = TFLogitsProcessorList() + + # In beam methods, we need to keep at least one non-eos token to explore continuations that might have a + # better score (i.e. keep len(generation_config.eos_token_id) + 1) + if generation_config.num_beams > 1: + if isinstance(generation_config.eos_token_id, list): + min_tokens_to_keep = len(generation_config.eos_token_id) + 1 + else: + min_tokens_to_keep = 2 + else: + min_tokens_to_keep = 1 + + if generation_config.temperature is not None and generation_config.temperature != 1.0: + warpers.append(TFTemperatureLogitsWarper(generation_config.temperature)) + if generation_config.top_k is not None and generation_config.top_k != 0: + warpers.append(TFTopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep)) + if generation_config.top_p is not None and generation_config.top_p < 1.0: + warpers.append(TFTopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep)) + return warpers + + def _get_logits_processor( + self, + generation_config: GenerationConfig, + input_ids_seq_length: int, + logits_processor: Optional[TFLogitsProcessorList], + ) -> TFLogitsProcessorList: + """ + This class returns a [`TFLogitsProcessorList`] list object that contains all relevant [`TFLogitsProcessor`] + instances used to modify the scores of the language model head. + """ + processors = TFLogitsProcessorList() + + # instantiate processors list + if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0: + processors.append(TFRepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty)) + if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0: + processors.append(TFNoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size)) + if generation_config.bad_words_ids is not None: + processors.append( + TFNoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id) + ) + if ( + generation_config.min_length is not None + and generation_config.eos_token_id is not None + and generation_config.min_length > 0 + ): + processors.append(TFMinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)) + if generation_config.forced_bos_token_id is not None: + processors.append(TFForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) + if generation_config.forced_eos_token_id is not None: + processors.append( + TFForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) + ) + if generation_config.suppress_tokens is not None: + processors.append(TFSuppressTokensLogitsProcessor(generation_config.suppress_tokens)) + if generation_config.begin_suppress_tokens is not None: + begin_index = input_ids_seq_length + begin_index = ( + begin_index + if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) + else begin_index + 1 + ) + if generation_config.forced_decoder_ids is not None: + begin_index += generation_config.forced_decoder_ids[-1][ + 0 + ] # generation starts after the last token that is forced + processors.append( + TFSuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) + ) + if generation_config.forced_decoder_ids is not None: + processors.append(TFForceTokensLogitsProcessor(generation_config.forced_decoder_ids)) + + processors = self._merge_criteria_processor_list(processors, logits_processor) + return processors + + def _merge_criteria_processor_list( + self, + default_list: TFLogitsProcessorList, + custom_list: TFLogitsProcessorList, + ) -> TFLogitsProcessorList: + if len(custom_list) == 0: + return default_list + for default in default_list: + for custom in custom_list: + if type(custom) is type(default): + object_type = "logits processor" + raise ValueError( + f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" + f" `generate`, but it has already been created with the values {default}. {default} has been" + " created by passing the corresponding arguments to generate or by the model's config default" + f" values. If you just want to change the default values of {object_type} consider passing" + f" them as arguments to `generate` instead of using a custom {object_type}." + ) + default_list.extend(custom_list) + return default_list + + def greedy_search( + self, + input_ids: tf.Tensor, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + logits_processor: Optional[TFLogitsProcessorList] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + **model_kwargs, + ) -> Union[TFGreedySearchOutput, tf.Tensor]: + r""" + Generates sequences for models with a language modeling head using greedy decoding. + + Parameters: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + logits_processor (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + max_length (`int`, *optional*, defaults to 20): + The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + model_kwargs: + Additional model specific keyword arguments will be forwarded to the `call` function of the model. If + model is an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.TFGreedySearchDecoderOnlyOutput`], [`~generation.TFGreedySearchEncoderDecoderOutput`] or + `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a + [`~generation.TFGreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.TFGreedySearchEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... TFAutoModelForCausalLM, + ... TFLogitsProcessorList, + ... TFMinLengthLogitsProcessor, + ... ) + + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") + + >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token + >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id + + >>> input_prompt = "Today is a beautiful day, and" + >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids + + >>> # instantiate logits processors + >>> logits_processor = TFLogitsProcessorList( + ... [ + ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), + ... ] + ... ) + + >>> outputs = model.greedy_search(input_ids, logits_processor=logits_processor) + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ["Today is a beautiful day, and I'm so happy to be here. I'm so happy to"] + ```""" + + # 1. init greedy_search values + logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() + + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) + use_xla = not tf.executing_eagerly() + # TODO (Joao): fix cache format or find programatic way to detect cache index + # GPT2 and other models has a slightly different cache structure, with a different batch axis + model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) + cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 + # some models, like XLNet, need more than the last token in the presence of past_key_values + needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) + + # 2. init `attentions`, `hidden_states`, and `scores` tuples + scores = [] if (return_dict_in_generate and output_scores) else None + decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None + cross_attentions = [] if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None + + # 3. init tensors to use for "xla-compileable" generate function + batch_size, cur_len = shape_list(input_ids) + + # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences` + input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) + generated = tf.concat([input_ids, input_ids_padding], axis=-1) + finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) + + # 4. define "xla-compile-able" stop-condition and auto-regressive function + # define condition fn + def greedy_search_cond_fn(generated, finished_sequences, cur_len, model_kwargs): + """state termination condition fn.""" + return ~tf.reduce_all(finished_sequences) + + # define condition fn + def greedy_search_body_fn(generated, finished_sequences, cur_len, model_kwargs): + """state update fn.""" + if model_kwargs.get("past_key_values") is None or needs_full_input: + input_ids = generated[:, :cur_len] + else: + input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) + model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs) + # forward pass to get next token logits + model_outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + next_token_logits = model_outputs.logits[:, -1] + + # pre-process distribution + next_tokens_scores = logits_processor(generated, next_token_logits, cur_len) + + # Store scores, attentions and hidden_states when required + if not use_xla and return_dict_in_generate: + if output_scores: + scores.append(next_tokens_scores) + if output_attentions and self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.decoder_attentions) + elif output_attentions and not self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.attentions) + if self.config.is_encoder_decoder: + cross_attentions.append(model_outputs.cross_attentions) + + if output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.decoder_hidden_states) + elif output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.hidden_states) + + # argmax + next_tokens = tf.argmax(next_tokens_scores, axis=-1, output_type=tf.int32) + + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) + next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) + next_token_is_eos = tf.math.reduce_any( + tf.equal( + tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) + ), + axis=0, + ) + finished_sequences = finished_sequences | next_token_is_eos + + # update `generated` and `cur_len` + update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) + generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) + cur_len += 1 + + # update model_kwargs + if use_xla: + model_kwargs = self._update_model_kwargs_for_xla_generation( + model_outputs=model_outputs, + model_kwargs=model_kwargs, + cur_len=cur_len, + max_length=max_length, + batch_size=batch_size, + is_encoder_decoder=self.config.is_encoder_decoder, + batch_axis=cache_batch_axis, + ) + else: + model_kwargs = self._update_model_kwargs_for_generation( + model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + # if we don't cache past_key_values key values we need the whole input + if model_kwargs.get("past_key_values", None) is None: + # let's throw out `past_key_values` since we don't want `None` tensors + model_kwargs.pop("past_key_values", None) + + return generated, finished_sequences, cur_len, model_kwargs + + # 5. run generation + # 1st generation step has to be run before to initialize `past_key_values` + generated, finished_sequences, cur_len, model_kwargs = greedy_search_body_fn( + generated, finished_sequences, cur_len, model_kwargs + ) + + # 2-to-n generation steps can then be run in autoregressive fashion + # only in case 1st generation step does NOT yield EOS token though + maximum_iterations = max_length - cur_len + generated, _, cur_len, _ = tf.while_loop( + greedy_search_cond_fn, + greedy_search_body_fn, + (generated, finished_sequences, cur_len, model_kwargs), + maximum_iterations=maximum_iterations, + ) + + # 6. prepare outputs + if not use_xla: + # cut for backward compatibility + generated = generated[:, :cur_len] + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + # if model is an encoder-decoder, retrieve encoder attention weights + # and hidden states + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + scores = tuple(scores) if scores is not None else None + decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None + cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None + decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None + + return TFGreedySearchEncoderDecoderOutput( + sequences=generated, + scores=scores, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + ) + else: + return TFGreedySearchDecoderOnlyOutput( + sequences=generated, + scores=scores, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + ) + else: + return generated + + def sample( + self, + input_ids: tf.Tensor, + logits_processor: Optional[TFLogitsProcessorList] = None, + logits_warper: Optional[TFLogitsProcessorList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + seed: Optional[Tuple[int, int]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + **model_kwargs, + ) -> Union[TFSampleOutput, tf.Tensor]: + r""" + Generates sequences for models with a language modeling head using multinomial sampling. + + Parameters: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + logits_processor (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + logits_warper (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] + used to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + max_length (`int`, *optional*, defaults to 20): + The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + seed (`List[int]`, *optional*): + Random seed to control sampling, containing two integers, used when `do_sample` is `True`. See the + `seed` argument from stateless functions in `tf.random`. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + model_kwargs: + Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an + encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.TFSampleDecoderOnlyOutput`], [`~generation.TFSampleEncoderDecoderOutput`] or `tf.Tensor`: A + `tf.Tensor` containing the generated tokens (default behaviour) or a + [`~generation.TFSampleDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.TFSampleEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> import tensorflow as tf + >>> from transformers import ( + ... AutoTokenizer, + ... TFAutoModelForCausalLM, + ... TFLogitsProcessorList, + ... TFMinLengthLogitsProcessor, + ... TFTopKLogitsWarper, + ... TFTemperatureLogitsWarper, + ... ) + + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> model = TFAutoModelForCausalLM.from_pretrained("openai-community/gpt2") + + >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token + >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id + + >>> input_prompt = "Today is a beautiful day, and" + >>> input_ids = tokenizer(input_prompt, return_tensors="tf").input_ids + + >>> # instantiate logits processors + >>> logits_processor = TFLogitsProcessorList( + ... [ + ... TFMinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), + ... ] + ... ) + >>> # instantiate logits processors + >>> logits_warper = TFLogitsProcessorList( + ... [ + ... TFTopKLogitsWarper(50), + ... TFTemperatureLogitsWarper(0.7), + ... ] + ... ) + + >>> tf.random.set_seed(0) + >>> outputs = model.sample(input_ids, logits_processor=logits_processor, logits_warper=logits_warper) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Today is a beautiful day, and I love my country. But when I look at Donald Trump,'] + ```""" + + # 1. init greedy_search values + logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() + logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() + + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) + use_xla = not tf.executing_eagerly() + # TODO (Joao): fix cache format or find programatic way to detect cache index + # GPT2 and other models has a slightly different cache structure, with a different batch axis + model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) + cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 + # some models, like XLNet, need more than the last token in the presence of past_key_values + needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) + + # 2. init `attentions`, `hidden_states`, and `scores` tuples + scores = [] if (return_dict_in_generate and output_scores) else None + decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None + cross_attentions = [] if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None + + # 3. init tensors to use for "xla-compileable" generate function + batch_size, cur_len = shape_list(input_ids) + + # initialize `generated` (pre-populated with `pad_token_id`), `finished_sequences` + input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) + generated = tf.concat([input_ids, input_ids_padding], axis=-1) + finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) + + # 4. define "xla-compile-able" stop-condition and auto-regressive function + def sample_cond_fn(generated, finished_sequences, cur_len, model_kwargs): + return ~tf.reduce_all(finished_sequences) + + def sample_body_fn(generated, finished_sequences, cur_len, model_kwargs): + if model_kwargs.get("past_key_values") is None or needs_full_input: + input_ids = generated[:, :cur_len] + else: + input_ids = tf.expand_dims(generated[:, cur_len - 1], -1) + model_inputs = self.prepare_inputs_for_generation(input_ids, use_cache=use_cache, **model_kwargs) + # forward pass to get next token logits + model_outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + next_token_logits = model_outputs.logits[:, -1] + + # pre-process distribution + next_tokens_scores = logits_processor(generated, next_token_logits, cur_len) + next_tokens_scores = logits_warper(generated, next_tokens_scores, cur_len) + + # Store scores, attentions and hidden_states when required + if not use_xla and return_dict_in_generate: + if output_scores: + scores.append(next_tokens_scores) + if output_attentions and self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.decoder_attentions) + elif output_attentions and not self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.attentions) + if self.config.is_encoder_decoder: + cross_attentions.append(model_outputs.cross_attentions) + + if output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.decoder_hidden_states) + elif output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.hidden_states) + + # sample + if seed is not None: + sample_seed = seed + else: + sample_seed = tf.experimental.numpy.random.randint(tf.int32.min, tf.int32.max, (2,), dtype=tf.int32) + next_tokens = tf.squeeze( + tf.random.stateless_categorical( + logits=next_tokens_scores, num_samples=1, seed=sample_seed, dtype=tf.int32 + ), + axis=1, + ) + + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) + next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) + next_token_is_eos = tf.math.reduce_any( + tf.equal( + tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) + ), + axis=0, + ) + finished_sequences = finished_sequences | next_token_is_eos + + # update `generated` and `cur_len` + update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) + generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) + cur_len += 1 + + # update model_kwargs + if use_xla: + model_kwargs = self._update_model_kwargs_for_xla_generation( + model_outputs=model_outputs, + model_kwargs=model_kwargs, + cur_len=cur_len, + max_length=max_length, + batch_size=batch_size, + is_encoder_decoder=self.config.is_encoder_decoder, + batch_axis=cache_batch_axis, + ) + else: + model_kwargs = self._update_model_kwargs_for_generation( + model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + # if we don't cache past_key_values key values we need the whole input + if model_kwargs.get("past_key_values", None) is None: + # let's throw out `past_key_values` since we don't want `None` tensors + model_kwargs.pop("past_key_values", None) + + return generated, finished_sequences, cur_len, model_kwargs + + # 5. run generation + # 1st generation step has to be run before to initialize `past_key_values` + generated, finished_sequences, cur_len, model_kwargs = sample_body_fn( + generated, finished_sequences, cur_len, model_kwargs + ) + + # 2-to-n generation steps can then be run in autoregressive fashion + # only in case 1st generation step does NOT yield EOS token though + maximum_iterations = max_length - cur_len + generated, _, cur_len, _ = tf.while_loop( + sample_cond_fn, + sample_body_fn, + (generated, finished_sequences, cur_len, model_kwargs), + maximum_iterations=maximum_iterations, + ) + + # 6. prepare outputs + if not use_xla: + # cut for backward compatibility + generated = generated[:, :cur_len] + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + # if model is an encoder-decoder, retrieve encoder attention weights + # and hidden states + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + scores = tuple(scores) if scores is not None else None + decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None + cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None + decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None + + return TFSampleEncoderDecoderOutput( + sequences=generated, + scores=scores, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + ) + else: + return TFSampleDecoderOnlyOutput( + sequences=generated, + scores=scores, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + ) + else: + return generated + + @staticmethod + def _gather_beams(nested, beam_indices, batch_axis=0): + """Gathers the beam slices indexed by beam_indices into new beam array.""" + + def gather_fn(tensor): + if batch_axis > 0: + # pushes all dimentions before the batch to the end, so we get (batch, beam_id, ...) + perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0) + tensor = tf.transpose(tensor, perm=perm) + + gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1) + if batch_axis > 0: + # transposes back to the original dimensions + perm = tf.concat((tf.range(tf.rank(tensor))[batch_axis:], tf.range(batch_axis)), axis=0) + perm = tf.math.invert_permutation(perm) + gathered_tensor = tf.transpose(gathered_tensor, perm=perm) + + return gathered_tensor + + return tf.nest.map_structure(gather_fn, nested) + + def beam_search( + self, + input_ids: tf.Tensor, + do_sample: bool = False, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + length_penalty: Optional[float] = None, + early_stopping: Optional[Union[bool, str]] = None, + logits_processor: Optional[TFLogitsProcessorList] = None, + logits_warper: Optional[TFLogitsProcessorList] = None, + num_return_sequences: Optional[int] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + **model_kwargs, + ) -> Union[TFBeamSearchOutput, TFBeamSampleOutput, tf.Tensor]: + r""" + Generates sequences for models with a language modeling head using beam search. If `do_sample` is `False`, uses + a greedy approach, otherwise does multinomial sampling without replacement. + + Parameters: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + do_sample (`bool`, *optional*, defaults to `False`): + Whether or not to use sampling ; use greedy decoding otherwise. + max_length (`int`, *optional*, defaults to 20): + The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + length_penalty (`float`, *optional*, defaults to 1.0): + Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent + to the sequence length, which in turn is used to divide the score of the sequence. Since the score is + the log likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, + while `length_penalty` < 0.0 encourages shorter sequences. + early_stopping (`bool` or `str`, *optional*, defaults to `False`): + Controls the stopping condition for beam-based methods, like beam-search. It accepts the following + values: `True`, where the generation stops as soon as there are `num_beams` complete candidates; + `False`, where an heuristic is applied and the generation stops when is it very unlikely to find better + candidates; `"never"`, where the beam search procedure only stops when there cannot be better + candidates (canonical beam search algorithm). + logits_processor (`[TFLogitsProcessorList]`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + logits_warper (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] + used to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + num_return_sequences(`int`, *optional*, defaults to 1): + The number of independently computed returned sequences for each element in the batch. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. + model_kwargs: + Additional model specific kwargs will be forwarded to the `call` function of the model. If model is an + encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.TFBeamSearchDecoderOnlyOutput`], [`~generation.TFBeamSearchEncoderDecoderOutput`] or + `tf.Tensor`: A `tf.Tensor` containing the generated tokens (default behaviour) or a + [`~generation.TFBeamSearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.TFBeamSearchEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... TFAutoModelForSeq2SeqLM, + ... TFLogitsProcessorList, + ... TFMinLengthLogitsProcessor, + ... ) + >>> import tensorflow as tf + + >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") + >>> model = TFAutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base") + + >>> encoder_input_str = "translate English to German: How old are you?" + >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="tf").input_ids + + >>> # lets run beam search using 3 beams + >>> num_beams = 3 + >>> # define decoder start token ids + >>> input_ids = tf.ones((1, num_beams, 1), dtype=tf.int32) + >>> input_ids = input_ids * model.generation_config.decoder_start_token_id + + >>> # add encoder_outputs to model keyword arguments + >>> encoder_outputs = model.get_encoder()(encoder_input_ids, return_dict=True) + >>> encoder_outputs.last_hidden_state = tf.repeat( + ... tf.expand_dims(encoder_outputs.last_hidden_state, axis=0), num_beams, axis=1 + ... ) + >>> model_kwargs = {"encoder_outputs": encoder_outputs} + + >>> # instantiate logits processors + >>> logits_processor = TFLogitsProcessorList( + ... [TFMinLengthLogitsProcessor(5, eos_token_id=model.generation_config.eos_token_id)] + ... ) + + >>> outputs = model.beam_search(input_ids, logits_processor=logits_processor, **model_kwargs) + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Wie alt bist du?'] + ```""" + + def flatten_beam_dim(tensor, batch_axis=0): + """Flattens the first two dimensions of a non-scalar array.""" + shape = shape_list(tensor) + return tf.reshape( + tensor, + shape[:batch_axis] + [shape[batch_axis] * shape[batch_axis + 1]] + shape[batch_axis + 2 :], + ) + + def unflatten_beam_dim(tensor, num_beams, batch_axis=0): + """Unflattens the first, flat batch*beam dimension of a non-scalar array.""" + shape = shape_list(tensor) + return tf.reshape(tensor, shape[:batch_axis] + [-1, num_beams] + shape[batch_axis + 1 :]) + + # 1. init beam_search values + logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() + logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() + + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + num_return_sequences = ( + num_return_sequences if num_return_sequences is not None else self.generation_config.num_return_sequences + ) + + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + length_penalty = length_penalty if length_penalty is not None else self.generation_config.length_penalty + early_stopping = early_stopping if early_stopping is not None else self.generation_config.early_stopping + + use_cache = model_kwargs.pop("use_cache", self.generation_config.use_cache) + use_xla = not tf.executing_eagerly() + # TODO (Joao): fix cache format or find programatic way to detect cache index + # GPT2 and other models has a slightly different cache structure, with a different batch axis + model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) + cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 + # some models, like XLNet, need more than the last token in the presence of past_key_values + needs_full_input = "use_mems" in set(inspect.signature(self.prepare_inputs_for_generation).parameters.keys()) + + # 2. init `attentions`, `hidden_states`, and `scores` tuples + all_scores = [] if (return_dict_in_generate and output_scores) else None + decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None + cross_attentions = [] if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None + + # 3. init tensors to use for "xla-compileable" generate function + batch_size, num_beams, cur_len = shape_list(input_ids) + # store the prompt length of decoder + decoder_prompt_len = cur_len + + # per batch, beam-item holding current token in loop, pre-populated with `pad_token_id` + input_ids_padding = tf.ones((batch_size, num_beams, max_length - cur_len), dtype=tf.int32) * ( + pad_token_id or 0 + ) + running_sequences = tf.concat([input_ids, input_ids_padding], axis=-1) + sequences = tf.ones((batch_size, num_beams, max_length), dtype=tf.int32) * (pad_token_id or 0) + + # per batch,beam-item state bit indicating if sentence has finished. + is_sent_finished = tf.zeros((batch_size, num_beams), dtype=tf.bool) + + # per batch, beam-item score, logprobs + running_scores = tf.tile( + tf.expand_dims(tf.convert_to_tensor([0.0] + [-1.0e9] * (num_beams - 1)), axis=0), [batch_size, 1] + ) + scores = tf.ones((batch_size, num_beams)) * -1.0e9 + + # per batch beam indices + running_beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1 + beam_indices = tf.ones((batch_size, num_beams, max_length - decoder_prompt_len), dtype=tf.int32) * -1 + + # flatten beam dim + if "encoder_outputs" in model_kwargs: + model_kwargs["encoder_outputs"]["last_hidden_state"] = flatten_beam_dim( + model_kwargs["encoder_outputs"]["last_hidden_state"] + ) + if "attention_mask" in model_kwargs: + model_kwargs["attention_mask"] = flatten_beam_dim(model_kwargs["attention_mask"]) + + # 4. define "xla-compile-able" stop-condition and auto-regressive function + # define stop-condition and auto-regressive function + def beam_search_cond_fn( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + model_kwargs, + ): + """ + Beam Search termination condition function -- halts the generation loop if any of these conditions becomes + False + """ + # 1. is less than max length? + not_max_length_yet = cur_len < max_length + + # 2. can the new beams still improve? + # early_stopping == False -> apply heuristic = always get the best score from `cur_len - decoder_prompt_len`. See the discussion + # below for more details. + # https://github.com/huggingface/transformers/pull/20901#issuecomment-1369845565 + # early_stopping == "never" -> compute the best score from max_length or cur_len, depending on the sign of + # length_penalty. Positive length_penalty favors longer sequences, thus we use max_length there. + if early_stopping == "never" and length_penalty > 0.0: + best_running_score = running_scores[:, :1] / ((max_length - decoder_prompt_len) ** length_penalty) + else: + best_running_score = running_scores[:, :1] / ( + tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty + ) + worst_finished_score = tf.where( + is_sent_finished, tf.math.reduce_min(scores, axis=1, keepdims=True), -1.0e9 + ) + improvement_still_possible = tf.math.reduce_any(best_running_score > worst_finished_score) + + # 3. is there still a beam that has not finished? + still_open_beam = ~(tf.math.reduce_all(is_sent_finished) & (early_stopping is True)) + + return not_max_length_yet & still_open_beam & improvement_still_possible + + def beam_search_body_fn( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + model_kwargs, + ): + """ + Beam Search iterative update function -- each iteration adds a new token and updates the best sequences + seen so far + """ + # 1. Forward current tokens + if model_kwargs.get("past_key_values") is None or needs_full_input: + input_ids = running_sequences[:, :, :cur_len] + else: + input_ids = tf.expand_dims(running_sequences[:, :, cur_len - 1], -1) + model_inputs = self.prepare_inputs_for_generation( + flatten_beam_dim(input_ids), use_cache=use_cache, **model_kwargs + ) + model_outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + logits = unflatten_beam_dim(model_outputs.logits[:, -1], num_beams) + + # 2. Compute log probs + # get log probabilities from logits, process logits with processors (*e.g.* min_length, ...), and + # add new logprobs to existing running logprobs scores. + log_probs = tf.nn.log_softmax(logits) + log_probs = logits_processor(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len) + log_probs = unflatten_beam_dim(log_probs, num_beams) + if do_sample: + log_probs = logits_warper(flatten_beam_dim(running_sequences), flatten_beam_dim(log_probs), cur_len) + log_probs = unflatten_beam_dim(log_probs, num_beams) + log_probs_processed = log_probs + log_probs = log_probs + tf.expand_dims(running_scores, axis=2) + vocab_size = log_probs.shape[2] + log_probs = tf.reshape(log_probs, (batch_size, num_beams * vocab_size)) + + # Store scores, attentions and hidden_states when required + if not use_xla and return_dict_in_generate: + if output_scores: + all_scores.append( + logits_warper( + flatten_beam_dim(running_sequences), + flatten_beam_dim(log_probs_processed), + cur_len, + ) + ) + if output_attentions and self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.decoder_attentions) + elif output_attentions and not self.config.is_encoder_decoder: + decoder_attentions.append(model_outputs.attentions) + if self.config.is_encoder_decoder: + cross_attentions.append(model_outputs.cross_attentions) + + if output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.decoder_hidden_states) + elif output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(model_outputs.hidden_states) + + # 3. Retrieve top-K + # Each item in batch has num_beams * vocab_size candidate sequences. For each item, get the top 2*k + # candidates with the highest log-probabilities. We gather the top 2*K beams here so that even if the + # best K sequences reach EOS simultaneously, we have another K sequences remaining to continue the live + # beam search. + # Gather the top 2*K scores from _all_ beams. + # Gather 2*k top beams. + # Recover the beam index by floor division. + # Recover token id by modulo division and expand Id array for broadcasting. + # Update sequences for the 2*K top-k new sequences. + beams_to_keep = 2 * num_beams + if do_sample: + topk_indices = sample_without_replacement(log_probs, beams_to_keep) + topk_log_probs = tf.gather(log_probs, topk_indices, axis=1, batch_dims=1) + else: + topk_log_probs, topk_indices = tf.math.top_k(log_probs, k=beams_to_keep) + topk_current_beam_indices = topk_indices // vocab_size + topk_running_beam_indices = self._gather_beams(running_beam_indices, topk_current_beam_indices) + topk_running_sequences = self._gather_beams(running_sequences, topk_current_beam_indices) + topk_ids = topk_indices % vocab_size + + # writes the new token + indices_batch = tf.repeat(tf.range(batch_size), [beams_to_keep]) + indices_beam = tf.tile(tf.range(beams_to_keep), [batch_size]) + update_indices = tf.stack( + [indices_batch, indices_beam, tf.broadcast_to(cur_len, [batch_size * beams_to_keep])], axis=-1 + ) + topk_sequences = tf.tensor_scatter_nd_update( + tensor=topk_running_sequences, + indices=update_indices, + updates=tf.reshape(topk_ids, [batch_size * beams_to_keep]), + ) + + # we want to store the beam indices with batch information -> real beam index = beam index % num beams + batch_modified_indices = topk_current_beam_indices + tf.broadcast_to( + tf.expand_dims(tf.range(batch_size) * num_beams, axis=1), topk_current_beam_indices.shape + ) + update_indices = tf.stack( + [ + indices_batch, + indices_beam, + tf.broadcast_to(cur_len - decoder_prompt_len, [batch_size * beams_to_keep]), + ], + axis=-1, + ) + topk_beam_indices = tf.tensor_scatter_nd_update( + tensor=topk_running_beam_indices, + indices=update_indices, + updates=tf.reshape(batch_modified_indices, [batch_size * beams_to_keep]), + ) + + # 4. Check which sequences have ended + # Update current sequences: Did the top `num_beams` sequences reach an end marker? + # To prevent these just finished sequences from being added to the current sequences + # set of active beam search sequences, set their log probs to a very large negative value. + if eos_token_id is None: + eos_in_next_token = tf.zeros(topk_sequences[:, :, cur_len].shape, dtype=tf.bool) + else: + eos_in_next_token = tf.math.reduce_any( + tf.equal( + tf.broadcast_to( + topk_sequences[:, :, cur_len], + [len(eos_token_id)] + topk_sequences[:, :, cur_len].shape, + ), + tf.expand_dims(tf.expand_dims(eos_token_id, -1), -1), + ), + axis=0, + ) + did_topk_just_finished = eos_in_next_token & tf.broadcast_to( + tf.concat((tf.ones((num_beams), dtype=tf.bool), tf.zeros((num_beams), dtype=tf.bool)), axis=0), + shape_list(eos_in_next_token), + ) + + # non-top `num_beams` eos tokens can't be used to finish a beam, but the others can't be used in the next + # running sentences either + running_topk_log_probs = topk_log_probs + tf.cast(eos_in_next_token, tf.float32) * -1.0e9 + + # 5. Get running sequences scores for next + # Determine the top k beam indices (from top 2*k beams) from log probs and gather top k beams + # (from top 2*k beams). + next_topk_indices = tf.math.top_k(running_topk_log_probs, k=num_beams)[1] + next_running_sequences, next_running_scores, next_running_beam_indices = self._gather_beams( + [topk_sequences, running_topk_log_probs, topk_beam_indices], next_topk_indices + ) + + # 6. Process topk logits + # Further process log probs: + # - add length penalty + # - make sure no scores can be added anymore if beam is full + # - make sure still running sequences cannot be chosen as finalized beam + topk_log_probs = topk_log_probs / ( + tf.cast(cur_len + 1 - decoder_prompt_len, dtype=tf.float32) ** length_penalty + ) + beams_in_batch_are_full = tf.broadcast_to( + tf.math.reduce_all(is_sent_finished, axis=-1, keepdims=True), shape_list(did_topk_just_finished) + ) & (early_stopping is True) + add_penalty = ~did_topk_just_finished | beams_in_batch_are_full + topk_log_probs += tf.cast(add_penalty, tf.float32) * -1.0e9 + + # 7. Get scores, sequences, is sentence finished for next. + # Combine sequences, scores, and flags along the beam dimension and compare new finished sequence scores + # to existing finished scores and select the best from the new set of beams + merged_sequences = tf.concat([sequences, topk_sequences], axis=1) + merged_scores = tf.concat([scores, topk_log_probs], axis=1) + merged_beams = tf.concat([beam_indices, topk_beam_indices], axis=1) + merged_is_sent_finished = tf.concat([is_sent_finished, did_topk_just_finished], axis=1) + topk_merged_indices = tf.math.top_k(merged_scores, k=num_beams)[1] + next_sequences, next_scores, next_beam_indices, next_is_sent_finished = self._gather_beams( + [merged_sequences, merged_scores, merged_beams, merged_is_sent_finished], topk_merged_indices + ) + + # 8. Prepare data for the next iteration + # Determine the top k beam indices from the original set of all beams. With these, gather the top k + # beam-associated caches. + cur_len = cur_len + 1 + if "past_key_values" in model_outputs: + cache = tf.nest.map_structure( + lambda tensor: unflatten_beam_dim(tensor, num_beams, batch_axis=cache_batch_axis), + model_outputs.past_key_values, + ) + next_running_indices = self._gather_beams(topk_current_beam_indices, next_topk_indices) + next_cache = self._gather_beams(cache, next_running_indices, batch_axis=cache_batch_axis) + model_outputs["past_key_values"] = tf.nest.map_structure( + lambda tensor: flatten_beam_dim(tensor, batch_axis=cache_batch_axis), next_cache + ) + + if use_xla: + next_model_kwargs = self._update_model_kwargs_for_xla_generation( + model_outputs=model_outputs, + model_kwargs=model_kwargs, + cur_len=cur_len, + max_length=max_length, + batch_size=(batch_size * num_beams), + is_encoder_decoder=self.config.is_encoder_decoder, + batch_axis=cache_batch_axis, + ) + else: + next_model_kwargs = self._update_model_kwargs_for_generation( + model_outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + + # if we don't cache past_key_values key values we need the whole input + if model_kwargs.get("past_key_values", None) is None: + # let's throw out `past_key_values` since we don't want `None` tensors + model_kwargs.pop("past_key_values", None) + + return ( + cur_len, + next_running_sequences, + next_running_scores, + next_running_beam_indices, + next_sequences, + next_scores, + next_beam_indices, + next_is_sent_finished, + decoder_prompt_len, + next_model_kwargs, + ) + + # 5. run generation + # 1st generation step has to be run before to initialize `past_key_values` (if active) + ( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + model_kwargs, + ) = beam_search_body_fn( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + model_kwargs, + ) + + # 2-to-n generation steps can then be run in autoregressive fashion (only in case 1st generation step does + # NOT yield EOS token though) + maximum_iterations = max_length - cur_len + ( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + _, + ) = tf.while_loop( + beam_search_cond_fn, + beam_search_body_fn, + ( + cur_len, + running_sequences, + running_scores, + running_beam_indices, + sequences, + scores, + beam_indices, + is_sent_finished, + decoder_prompt_len, + model_kwargs, + ), + maximum_iterations=maximum_iterations, + ) + + # 6. prepare outputs + # Account for the edge-case where there are no finished sequences for a particular batch item. If so, return + # running sequences for that batch item. + none_finished = tf.math.reduce_any(is_sent_finished, axis=1) + sequences = tf.where(none_finished[:, None, None], sequences, running_sequences) + beam_indices = tf.where(none_finished[:, None, None], beam_indices, running_beam_indices) + + # Apply the length penalty so that running scores match the finalized scores if they are used + running_scores = running_scores / (tf.cast(cur_len - decoder_prompt_len, dtype=tf.float32) ** length_penalty) + scores = tf.where(none_finished[:, None], scores, running_scores) + + # Take best beams for each batch (the score is sorted in descending order) + sequences = flatten_beam_dim(sequences[:, :num_return_sequences, :]) + scores = flatten_beam_dim(scores[:, :num_return_sequences]) + beam_indices = flatten_beam_dim(beam_indices[:, :num_return_sequences, :]) + + if not use_xla: + # Cut for backward compatibility + sequences = sequences[:, :cur_len] + beam_indices = beam_indices[:, : cur_len - decoder_prompt_len] + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + output_cls = TFBeamSampleEncoderDecoderOutput if do_sample else TFBeamSearchEncoderDecoderOutput + return output_cls( + sequences=sequences, + sequences_scores=scores, + scores=all_scores, + beam_indices=beam_indices, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + ) + else: + output_cls = TFBeamSampleDecoderOnlyOutput if do_sample else TFBeamSearchDecoderOnlyOutput + return output_cls( + sequences=sequences, + sequences_scores=scores, + scores=all_scores, + beam_indices=beam_indices, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + ) + else: + return sequences + + def contrastive_search( + self, + input_ids: tf.Tensor, + top_k: Optional[int] = 1, + penalty_alpha: Optional[float] = 0, + logits_processor: Optional[TFLogitsProcessorList] = None, + logits_warper: Optional[TFLogitsProcessorList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + **model_kwargs, + ) -> Union[TFContrastiveSearchOutput, tf.Tensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **contrastive search** and can + be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + Parameters: + input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + top_k (`int`, *optional*, defaults to 1): + The size of the candidate set that is used to re-rank for contrastive search + penalty_alpha (`float`, *optional*, defaults to 0): + The degeneration penalty for contrastive search; activate when it is larger than 0 + logits_processor (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + logits_warper (`TFLogitsProcessorList`, *optional*): + An instance of [`TFLogitsProcessorList`]. List of instances of class derived from [`TFLogitsWarper`] + used to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + max_length (`int`, *optional*, defaults to 20): + The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + model_kwargs: + Additional model specific keyword arguments will be forwarded to the `call` function of the model. If + model is an encoder-decoder model the kwargs should include `encoder_outputs`. + Return: + [`~generation.TFContrastiveSearchDecoderOnlyOutput`], + [`~generation.TFContrastiveSearchEncoderDecoderOutput`] or `tf.Tensor`: A `tf.Tensor` containing the + generated tokens (default behaviour) or a [`~generation.TFContrastiveySearchDecoderOnlyOutput`] if + `model.config.is_encoder_decoder=False` and `return_dict_in_generate=True` or a + [`~generation.TFContrastiveSearchEncoderDecoderOutput`] if `model.config.is_encoder_decoder=True`. + Examples: + ```python + >>> from transformers import AutoTokenizer, TFAutoModelForCausalLM + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") + >>> model = TFAutoModelForCausalLM.from_pretrained("facebook/opt-125m") + >>> # set pad_token_id to eos_token_id because OPT does not have a PAD token + >>> model.config.pad_token_id = model.config.eos_token_id + >>> input_prompt = "DeepMind Company is" + >>> input_ids = tokenizer(input_prompt, return_tensors="tf") + >>> outputs = model.contrastive_search(**input_ids, penalty_alpha=0.6, top_k=4, max_length=64) + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it'] + ```""" + + def gather_best_candidate(nested, selected_idx_stacked, batch_axis=0): + """Gathers the slices indexed by selected_idx_stacked from a potentially nested structure of tensors.""" + + def gather_fn(tensor): + gathered_tensor = tf.gather(params=tensor, indices=selected_idx_stacked, axis=batch_axis) + return gathered_tensor + + return tf.nest.map_structure(gather_fn, nested) + + # 1. init greedy_search values + logits_processor = logits_processor if logits_processor is not None else TFLogitsProcessorList() + logits_warper = logits_warper if logits_warper is not None else TFLogitsProcessorList() + max_length = max_length if max_length is not None else self.generation_config.max_length + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + use_cache = True # In contrastive search, we always use cache + model_kwargs.pop("use_cache", None) + + use_xla = not tf.executing_eagerly() + # TODO (Joao): fix cache format or find programatic way to detect cache index + # GPT2 and other models has a slightly different cache structure, with a different batch axis + model_name = str(self.decoder) if "EncoderDecoder" in str(self) else str(self) + cache_batch_axis = 1 if any(model_prefix in model_name for model_prefix in ("TFGPT2", "TFCTRL")) else 0 + + # 2. init `attentions`, `hidden_states`, and `scores` tuples + scores = [] if (return_dict_in_generate and output_scores) else None + decoder_attentions = [] if (return_dict_in_generate and output_attentions) else None + cross_attentions = [] if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = [] if (return_dict_in_generate and output_hidden_states) else None + + # 3. init tensors to use for "xla-compileable" generate function + batch_size, cur_len = shape_list(input_ids) + + # initialize `generated` (`input_ids` padded with `pad_token_id`), `finished_sequences` + input_ids_padding = tf.ones((batch_size, max_length - cur_len), dtype=tf.int32) * (pad_token_id or 0) + generated = tf.concat([input_ids, input_ids_padding], axis=-1) + finished_sequences = tf.zeros((batch_size,), dtype=tf.bool) + + # 4. define "xla-compile-able" stop-condition and auto-regressive function + # define condition fn + def contrastive_search_cond_fn( + generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables + ): + """state termination condition fn.""" + return ~tf.reduce_all(finished_sequences) + + # define condition fn + def contrastive_search_body_fn( + generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables + ): + """state update fn.""" + + # if the first step in the loop, encode all the prefix and obtain: (1) past_key_values; + # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step + if model_kwargs.get("past_key_values") is None: + # prepare inputs + model_inputs = self.prepare_inputs_for_generation( + generated[:, :cur_len], use_cache=use_cache, **model_kwargs + ) + + # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save + # the `encoder_outputs` + outputs = self( + **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions + ) + + # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with + # previous tokens) + if self.config.is_encoder_decoder: + last_hidden_states = outputs.decoder_hidden_states[-1] + else: + last_hidden_states = outputs.hidden_states[-1] + + # XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across + # iterations (with fixed shapes) + if use_xla: + last_hidden_states = tf.pad(last_hidden_states, [[0, 0], [0, max_length - cur_len], [0, 0]]) + + # next logit for contrastive search to select top-k candidate tokens + logit_for_next_step = outputs.logits[:, -1, :] + + if use_xla: + model_kwargs = self._update_model_kwargs_for_xla_generation( + model_outputs=outputs, + model_kwargs=model_kwargs, + cur_len=cur_len, + max_length=max_length, + batch_size=batch_size, + is_encoder_decoder=self.config.is_encoder_decoder, + batch_axis=cache_batch_axis, + ) + else: + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + + # Expands model inputs top_k times, for batched forward passes (akin to beam search). + _, model_kwargs = self._expand_inputs_for_generation( + expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs + ) + + past_key_values = model_kwargs.get("past_key_values") + if past_key_values is None: + raise ValueError( + f"{self.__class__.__name__} does not support caching and therefore **can't** be used " + "for contrastive search." + ) + elif ( + not isinstance(past_key_values[0], (tuple, tf.Tensor)) + or past_key_values[0][0].shape[0] != batch_size + ): + raise ValueError( + f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be " + "used for contrastive search without further modifications." + ) + else: + logit_for_next_step = next_step_cached_variables["logit_for_next_step"] + last_hidden_states = next_step_cached_variables["last_hidden_states"] + outputs = next_step_cached_variables["outputs"] + + # contrastive_search main logic start: + # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by + # degeneration penalty + + logit_for_next_step = logits_processor(generated, logit_for_next_step, cur_len) + logit_for_next_step = logits_warper(generated, logit_for_next_step, cur_len) + next_probs = stable_softmax(logit_for_next_step, axis=-1) + top_k_probs, top_k_ids = tf.math.top_k(next_probs, k=top_k) + + # Store scores, attentions and hidden_states when required + if not use_xla and return_dict_in_generate: + if output_scores: + scores.append(logit_for_next_step) + if output_attentions and self.config.is_encoder_decoder: + decoder_attentions.append(outputs.decoder_attentions) + elif output_attentions and not self.config.is_encoder_decoder: + decoder_attentions.append(outputs.attentions) + if self.config.is_encoder_decoder: + cross_attentions.append(outputs.cross_attentions) + + if output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(outputs.decoder_hidden_states) + elif output_hidden_states and self.config.is_encoder_decoder: + decoder_hidden_states.append(outputs.hidden_states) + + # Replicates the new past_key_values to match the `top_k` candidates + model_kwargs["past_key_values"] = tf.nest.map_structure( + lambda tensor: tf.repeat(tensor, top_k, axis=cache_batch_axis), model_kwargs["past_key_values"] + ) + + # compute the candidate tokens by the language model and collects their hidden_states + next_model_inputs = self.prepare_inputs_for_generation( + tf.reshape(top_k_ids, [-1, 1]), use_cache=use_cache, **model_kwargs + ) + outputs = self( + **next_model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions + ) + next_past_key_values = self._extract_past_from_model_output(outputs) + + logits = outputs.logits[:, -1, :] + # name is different for encoder-decoder and decoder-only models + if self.config.is_encoder_decoder: + next_hidden = outputs.decoder_hidden_states[-1] + full_hidden_states = outputs.decoder_hidden_states + else: + next_hidden = outputs.hidden_states[-1] + full_hidden_states = outputs.hidden_states + context_hidden = tf.repeat(last_hidden_states[:, :cur_len, :], top_k, axis=0) + + # compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the + # model confidence + selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k) + + # converts indices to a dimension of top_k to the stacked top_k * batch_size dimension, for indexing + # without a need to reshape on tensors that have these two dimensions stacked + selected_idx_stacked = selected_idx + tf.range(selected_idx.shape[0], dtype=tf.int64) * top_k + + # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing + # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores + # (model confidence minus degeneration penalty); (6) decoder hidden_states + next_tokens = tf.gather(top_k_ids, selected_idx, axis=1, batch_dims=1) + next_hidden = gather_best_candidate(next_hidden, selected_idx_stacked) + + # XLA: last_hidden_states normally grows at each step, but in XLA it is padded so as to be used across + # iterations (with fixed shapes) + if use_xla: + last_hidden_states = dynamic_update_slice(last_hidden_states, next_hidden, [0, cur_len, 0]) + else: + last_hidden_states = tf.concat([last_hidden_states, next_hidden], axis=1) + + next_decoder_hidden_states = gather_best_candidate(full_hidden_states, selected_idx_stacked) + next_past_key_values = gather_best_candidate( + next_past_key_values, selected_idx_stacked, batch_axis=cache_batch_axis + ) + logit_for_next_step = gather_best_candidate(logits, selected_idx_stacked) + + # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration + if self.config.is_encoder_decoder: + next_step_cross_attentions = () + next_step_decoder_attentions = () + if output_attentions: + next_step_cross_attentions = gather_best_candidate(outputs.cross_attentions, selected_idx_stacked) + next_step_decoder_attentions = gather_best_candidate( + outputs.decoder_attentions, selected_idx_stacked + ) + outputs = TFSeq2SeqLMOutput( + past_key_values=next_past_key_values, + decoder_hidden_states=next_decoder_hidden_states, + decoder_attentions=next_step_decoder_attentions or None, + cross_attentions=next_step_cross_attentions or None, + ) + else: + next_step_attentions = () + if output_attentions: + next_step_attentions = gather_best_candidate(outputs.attentions, selected_idx_stacked) + outputs = TFCausalLMOutputWithPast( + past_key_values=next_past_key_values, + hidden_states=next_decoder_hidden_states, + attentions=next_step_attentions or None, + ) + # contrastive_search main logic end + + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + unfinished_seq = 1 - tf.cast(finished_sequences, tf.int32) + next_tokens = next_tokens * unfinished_seq + pad_token_id * (1 - unfinished_seq) + next_token_is_eos = tf.math.reduce_any( + tf.equal( + tf.broadcast_to(next_tokens, (len(eos_token_id), batch_size)), tf.expand_dims(eos_token_id, -1) + ), + axis=0, + ) + finished_sequences = finished_sequences | next_token_is_eos + + # update `generated` and `cur_len` + update_indices = tf.stack([tf.range(batch_size), tf.broadcast_to(cur_len, [batch_size])], axis=-1) + generated = tf.tensor_scatter_nd_update(tensor=generated, indices=update_indices, updates=next_tokens) + cur_len += 1 + + if use_xla: + # NOTE: 1) relative to other generation strategies, contrastive search is always running forward + # passes one step ahead -- hence the `cur_len=cur_len + 1`; 2) the attention mask here is expanded from + # [batch_size, ...] to [batch_size*top_k, ...] -- hence the `batch_size=batch_size * top_k` + model_kwargs = self._update_model_kwargs_for_xla_generation( + model_outputs=outputs, + model_kwargs=model_kwargs, + cur_len=cur_len + 1, + max_length=max_length, + batch_size=batch_size * top_k, + is_encoder_decoder=self.config.is_encoder_decoder, + batch_axis=cache_batch_axis, + ) + else: + model_kwargs = self._update_model_kwargs_for_generation( + outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder + ) + + next_step_cached_variables = { + "logit_for_next_step": logit_for_next_step, + "last_hidden_states": last_hidden_states, + "outputs": outputs, + } + return generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables + + # 5. run generation + # 1st generation step has to be run before to initialize `past_key_values` + generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables = contrastive_search_body_fn( + generated, finished_sequences, cur_len, model_kwargs, None + ) + + # 2-to-n generation steps can then be run in autoregressive fashion + # only in case 1st generation step does NOT yield EOS token though + maximum_iterations = max_length - cur_len + generated, _, cur_len, _, _ = tf.while_loop( + contrastive_search_cond_fn, + contrastive_search_body_fn, + (generated, finished_sequences, cur_len, model_kwargs, next_step_cached_variables), + maximum_iterations=maximum_iterations, + ) + + # 6. prepare outputs + if not use_xla: + # cut for backward compatibility + generated = generated[:, :cur_len] + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + # if model is an encoder-decoder, retrieve encoder attention weights + # and hidden states + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + scores = tuple(scores) if scores is not None else None + decoder_attentions = tuple(decoder_attentions) if decoder_attentions is not None else None + cross_attentions = tuple(cross_attentions) if cross_attentions is not None else None + decoder_hidden_states = tuple(decoder_hidden_states) if decoder_hidden_states is not None else None + + return TFContrastiveSearchEncoderDecoderOutput( + sequences=generated, + scores=scores, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + ) + else: + return TFContrastiveSearchDecoderOnlyOutput( + sequences=generated, + scores=scores, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + ) + else: + return generated + + +def scatter_values_on_batch_indices(values, batch_indices): + shape = shape_list(batch_indices) + # broadcast batch dim to shape + broad_casted_batch_dims = tf.reshape(tf.broadcast_to(tf.expand_dims(tf.range(shape[0]), axis=-1), shape), [1, -1]) + # transform batch_indices to pair_indices + pair_indices = tf.transpose(tf.concat([broad_casted_batch_dims, tf.reshape(batch_indices, [1, -1])], 0)) + # scatter values to pair indices + return tf.scatter_nd(pair_indices, tf.reshape(values, [-1]), shape) + + +def sample_without_replacement(logits, num_samples): + """ + categorical sampling without replacement is currently not implemented the gumbel-max trick will do for now see + https://github.com/tensorflow/tensorflow/issues/9260 for more info + """ + z = -tf.math.log(-tf.math.log(tf.random.uniform(shape_list(logits), 0, 1))) + _, indices = tf.nn.top_k(logits + z, num_samples) + return indices + + +def _ranking_fast( + context_hidden: tf.Tensor, + next_hidden: tf.Tensor, + next_top_k_probs: tf.Tensor, + alpha: float, + beam_width: int, +) -> tf.Tensor: + """ + Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described + in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each + row in the batch. + """ + norm_context_hidden = context_hidden / tf.norm(context_hidden, axis=2, keepdims=True) + norm_next_hidden = next_hidden / tf.norm(next_hidden, axis=2, keepdims=True) + cosine_matrix = tf.squeeze(tf.linalg.matmul(norm_context_hidden, norm_next_hidden, transpose_b=True), axis=-1) + degeneration_penalty = tf.reduce_max(cosine_matrix, axis=-1) + next_top_k_probs = tf.reshape(next_top_k_probs, shape=[-1]) + contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty + contrastive_score = tf.reshape(contrastive_score, shape=[-1, beam_width]) + selected_idx = tf.argmax(contrastive_score, axis=1) + return selected_idx diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/generation/utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/generation/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..002cea9d73ca9d5c6af1eef2d454188595ef137b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/generation/utils.py @@ -0,0 +1,5083 @@ +# coding=utf-8 +# Copyright 2020 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. +# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +import inspect +import warnings +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.distributed as dist +from torch import nn + +from ..cache_utils import Cache, DynamicCache, StaticCache +from ..integrations.deepspeed import is_deepspeed_zero3_enabled +from ..modeling_outputs import CausalLMOutputWithPast, Seq2SeqLMOutput +from ..models.auto import ( + MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, + MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + MODEL_FOR_VISION_2_SEQ_MAPPING, +) +from ..utils import ModelOutput, is_accelerate_available, is_torchdynamo_compiling, logging +from .beam_constraints import DisjunctiveConstraint, PhrasalConstraint +from .beam_search import BeamScorer, BeamSearchScorer, ConstrainedBeamSearchScorer +from .candidate_generator import ( + AssistedCandidateGenerator, + CandidateGenerator, + PromptLookupCandidateGenerator, + _crop_past_key_values, + _prepare_attention_mask, + _prepare_token_type_ids, +) +from .configuration_utils import GenerationConfig, GenerationMode +from .logits_process import ( + EncoderNoRepeatNGramLogitsProcessor, + EncoderRepetitionPenaltyLogitsProcessor, + EpsilonLogitsWarper, + EtaLogitsWarper, + ExponentialDecayLengthPenalty, + ForcedBOSTokenLogitsProcessor, + ForcedEOSTokenLogitsProcessor, + ForceTokensLogitsProcessor, + HammingDiversityLogitsProcessor, + InfNanRemoveLogitsProcessor, + LogitNormalization, + LogitsProcessorList, + MinLengthLogitsProcessor, + MinNewTokensLengthLogitsProcessor, + NoBadWordsLogitsProcessor, + NoRepeatNGramLogitsProcessor, + PrefixConstrainedLogitsProcessor, + RepetitionPenaltyLogitsProcessor, + SequenceBiasLogitsProcessor, + SuppressTokensAtBeginLogitsProcessor, + SuppressTokensLogitsProcessor, + TemperatureLogitsWarper, + TopKLogitsWarper, + TopPLogitsWarper, + TypicalLogitsWarper, + UnbatchedClassifierFreeGuidanceLogitsProcessor, +) +from .stopping_criteria import ( + EosTokenCriteria, + MaxLengthCriteria, + MaxTimeCriteria, + StoppingCriteria, + StoppingCriteriaList, + validate_stopping_criteria, +) + + +if TYPE_CHECKING: + from ..modeling_utils import PreTrainedModel + from .streamers import BaseStreamer + +logger = logging.get_logger(__name__) + +if is_accelerate_available(): + from accelerate.hooks import AlignDevicesHook, add_hook_to_module + +NEED_SETUP_CACHE_CLASSES_MAPPING = { + "static": StaticCache, +} + + +@dataclass +class GenerateDecoderOnlyOutput(ModelOutput): + """ + Outputs of decoder-only generation models, when using non-beam methods. + + Args: + sequences (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True` is passed or when `config.output_logits=True`): + Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + scores: Optional[Tuple[torch.FloatTensor]] = None + logits: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class GenerateEncoderDecoderOutput(ModelOutput): + """ + Outputs of encoder-decoder generation models, when using non-beam methods. + + Args: + sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True` is passed or when `config.output_logits=True`): + Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + scores: Optional[Tuple[torch.FloatTensor]] = None + logits: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class GenerateBeamDecoderOnlyOutput(ModelOutput): + """ + Outputs of decoder-only generation models, when using beam methods. + + Args: + sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting + of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. + Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. + logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True` is passed or when `config.output_logits=True`): + Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `torch.LongTensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams, num_heads, generated_length, sequence_length)`. + hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + sequences_scores: Optional[torch.FloatTensor] = None + scores: Optional[Tuple[torch.FloatTensor]] = None + logits: Optional[Tuple[torch.FloatTensor]] = None + beam_indices: Optional[torch.LongTensor] = None + attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +@dataclass +class GenerateBeamEncoderDecoderOutput(ModelOutput): + """ + Outputs of encoder-decoder generation models, when using beam methods. + + Args: + sequences (`torch.LongTensor` of shape `(batch_size*num_return_sequences, sequence_length)`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter + if all batches finished early due to the `eos_token_id`. + sequences_scores (`torch.FloatTensor` of shape `(batch_size*num_return_sequences)`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Final beam scores of the generated `sequences`. + scores (`tuple(torch.FloatTensor)` *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam transition scores for each vocabulary token at each generation step. Beam transition scores consisting + of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. + Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. + logits (`tuple(torch.FloatTensor)` *optional*, returned when `output_logits=True` is passed or when `config.output_logits=True`): + Unprocessed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) + at each generation step. Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for + each generated token), with each tensor of shape `(batch_size, config.vocab_size)`. + beam_indices (`torch.LongTensor`, *optional*, returned when `output_scores=True` is passed or when `config.output_scores=True`): + Beam indices of generated token id at each generation step. `torch.LongTensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer of the decoder) of shape `(batch_size, num_heads, + sequence_length, sequence_length)`. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`. + decoder_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, num_heads, generated_length, + sequence_length)`. + cross_attentions (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_attentions=True` is passed or `config.output_attentions=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size, num_heads, generated_length, sequence_length)`. + decoder_hidden_states (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of + `torch.FloatTensor` of shape `(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. + past_key_values (`tuple(tuple(torch.FloatTensor)))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + NOTE: some models have a different `past_key_values` format, confirm with the model's documentation. + Usually a Tuple (one element for each layer of the decoder) of tuples (two elements, key tensor and value + tensor). The first Tuple is of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if + `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads, + encoder_sequence_length, embed_size_per_head)`. + """ + + sequences: torch.LongTensor = None + sequences_scores: Optional[torch.FloatTensor] = None + scores: Optional[Tuple[torch.FloatTensor]] = None + logits: Optional[Tuple[torch.FloatTensor]] = None + beam_indices: Optional[torch.LongTensor] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + cross_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None + past_key_values: Optional[Tuple[Tuple[Tuple[torch.FloatTensor]]]] = None + + +# Equivalent classes (kept for retrocompatibility purposes) +GreedySearchDecoderOnlyOutput = GenerateDecoderOnlyOutput +ContrastiveSearchDecoderOnlyOutput = GenerateDecoderOnlyOutput +SampleDecoderOnlyOutput = GenerateDecoderOnlyOutput + +ContrastiveSearchEncoderDecoderOutput = GenerateEncoderDecoderOutput +GreedySearchEncoderDecoderOutput = GenerateEncoderDecoderOutput +SampleEncoderDecoderOutput = GenerateEncoderDecoderOutput + +BeamSearchDecoderOnlyOutput = GenerateBeamDecoderOnlyOutput +BeamSampleDecoderOnlyOutput = GenerateBeamDecoderOnlyOutput + +BeamSearchEncoderDecoderOutput = GenerateBeamEncoderDecoderOutput +BeamSampleEncoderDecoderOutput = GenerateBeamEncoderDecoderOutput + +GreedySearchOutput = Union[GreedySearchEncoderDecoderOutput, GreedySearchDecoderOnlyOutput] +SampleOutput = Union[SampleEncoderDecoderOutput, SampleDecoderOnlyOutput] +BeamSearchOutput = Union[BeamSearchEncoderDecoderOutput, BeamSearchDecoderOnlyOutput] +BeamSampleOutput = Union[BeamSampleEncoderDecoderOutput, BeamSampleDecoderOnlyOutput] +ContrastiveSearchOutput = Union[ContrastiveSearchEncoderDecoderOutput, ContrastiveSearchDecoderOnlyOutput] + +# Typing shortcuts +GenerateNonBeamOutput = Union[GenerateDecoderOnlyOutput, GenerateEncoderDecoderOutput] +GenerateBeamOutput = Union[GenerateBeamDecoderOnlyOutput, GenerateBeamEncoderDecoderOutput] +GenerateOutput = Union[GenerateNonBeamOutput, GenerateBeamOutput] + + +class GenerationMixin: + """ + A class containing all functions for auto-regressive text generation, to be used as a mixin in [`PreTrainedModel`]. + + The class exposes [`~generation.GenerationMixin.generate`], which can be used for: + - *greedy decoding* by calling [`~generation.GenerationMixin._greedy_search`] if `num_beams=1` and + `do_sample=False` + - *contrastive search* by calling [`~generation.GenerationMixin._contrastive_search`] if `penalty_alpha>0` and + `top_k>1` + - *multinomial sampling* by calling [`~generation.GenerationMixin._sample`] if `num_beams=1` and + `do_sample=True` + - *beam-search decoding* by calling [`~generation.GenerationMixin._beam_search`] if `num_beams>1` and + `do_sample=False` + - *beam-search multinomial sampling* by calling [`~generation.GenerationMixin._beam_sample`] if `num_beams>1` + and `do_sample=True` + - *diverse beam-search decoding* by calling [`~generation.GenerationMixin._group_beam_search`], if `num_beams>1` + and `num_beam_groups>1` + - *constrained beam-search decoding* by calling [`~generation.GenerationMixin._constrained_beam_search`], if + `constraints!=None` or `force_words_ids!=None` + - *assisted decoding* by calling [`~generation.GenerationMixin._assisted_decoding`], if + `assistant_model` or `prompt_lookup_num_tokens` is passed to `.generate()` + + You do not need to call any of the above methods directly. Pass custom parameter values to 'generate' instead. To + learn more about decoding strategies refer to the [text generation strategies guide](../generation_strategies). + """ + + def prepare_inputs_for_generation(self, *args, **kwargs): + raise NotImplementedError( + "A model class needs to define a `prepare_inputs_for_generation` method in order to use `.generate()`." + ) + + def _prepare_model_inputs( + self, + inputs: Optional[torch.Tensor] = None, + bos_token_id: Optional[int] = None, + model_kwargs: Optional[Dict[str, torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]: + """ + This function extracts the model-specific `inputs` for generation. + """ + # 1. retrieve all kwargs that are non-None or non-model input related. + # some encoder-decoder models have different names for model and encoder + if ( + self.config.is_encoder_decoder + and hasattr(self, "encoder") + and self.encoder.main_input_name != self.main_input_name + ): + input_name = self.encoder.main_input_name + else: + input_name = self.main_input_name + + model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None or k != input_name} + + # 2. check whether model_input_name is passed as kwarg + # if yes and `inputs` is None use kwarg inputs + inputs_kwarg = model_kwargs.pop(input_name, None) + if inputs_kwarg is not None and inputs is not None: + raise ValueError( + f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed. " + f"Make sure to either pass {inputs} or {input_name}=..." + ) + elif inputs_kwarg is not None: + inputs = inputs_kwarg + + # 3. In the presence of `inputs_embeds` for text models: + # - decoder-only models should complain if the user attempts to pass `inputs_embeds`, but the model + # doesn't have its forwarding implemented. `inputs_embeds` is kept in `model_kwargs` and can coexist with + # input_ids (`inputs_embeds` will be used in the 1st generation step, as opposed to `input_ids`) + # - encoder-decoder models should complain if the user attempts to pass `inputs_embeds` and `input_ids`, and + # pull the former to inputs. It will be used in place of `input_ids` to get the encoder hidden states. + if input_name == "input_ids" and "inputs_embeds" in model_kwargs: + if not self.config.is_encoder_decoder: + has_inputs_embeds_forwarding = "inputs_embeds" in set( + inspect.signature(self.prepare_inputs_for_generation).parameters.keys() + ) + if not has_inputs_embeds_forwarding: + raise ValueError( + f"You passed `inputs_embeds` to `.generate()`, but the model class {self.__class__.__name__} " + "doesn't have its forwarding implemented. See the GPT2 implementation for an example " + "(https://github.com/huggingface/transformers/pull/21405), and feel free to open a PR with it!" + ) + # In this case, `input_ids` is moved to the `model_kwargs`, so a few automations (like the creation of + # the attention mask) can rely on the actual model input. + model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation( + inputs, bos_token_id, model_kwargs=model_kwargs + ) + else: + if inputs is not None: + raise ValueError("You passed `inputs_embeds` and `input_ids` to `.generate()`. Please pick one.") + inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds" + + # 4. if `inputs` is still None, try to create `input_ids` from BOS token + inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs) + return inputs, input_name, model_kwargs + + def _maybe_initialize_input_ids_for_generation( + self, + inputs: Optional[torch.Tensor] = None, + bos_token_id: Optional[int] = None, + model_kwargs: Optional[Dict[str, torch.Tensor]] = None, + ) -> torch.LongTensor: + """Initializes input ids for generation, if necessary.""" + if inputs is not None: + return inputs + + encoder_outputs = model_kwargs.get("encoder_outputs") + if self.config.is_encoder_decoder and encoder_outputs is not None: + # make dummy input_ids with value -100, as a sanity check ensuring that they won't be used for encoding + shape = encoder_outputs.last_hidden_state.size()[:-1] + return torch.ones(shape, dtype=torch.long, device=self.device) * -100 + + # If there is some tensor in `model_kwargs`, we can infer the batch size from it. This is helpful with + # soft-prompting or in multimodal implementations built on top of decoder-only language models. + batch_size = 1 + for value in model_kwargs.values(): + if isinstance(value, torch.Tensor): + batch_size = value.shape[0] + break + + if "inputs_embeds" in model_kwargs: + return torch.ones((batch_size, 0), dtype=torch.long, device=self.device) + + if bos_token_id is None: + raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") + + return torch.ones((batch_size, 1), dtype=torch.long, device=self.device) * bos_token_id + + def _prepare_attention_mask_for_generation( + self, + inputs: torch.Tensor, + pad_token_id: Optional[int], + eos_token_id: Optional[Union[int, List[int]]], + ) -> torch.LongTensor: + is_input_ids = len(inputs.shape) == 2 and inputs.dtype in [torch.int, torch.long] + is_pad_token_in_inputs = (pad_token_id is not None) and (pad_token_id in inputs) + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or (pad_token_id not in eos_token_id) + + # Check if input is input_ids and padded -> only then is attention_mask defined + if is_input_ids and is_pad_token_in_inputs and is_pad_token_not_equal_to_eos_token_id: + return inputs.ne(pad_token_id).long() + else: + return torch.ones(inputs.shape[:2], dtype=torch.long, device=inputs.device) + + def _prepare_encoder_decoder_kwargs_for_generation( + self, inputs_tensor: torch.Tensor, model_kwargs, model_input_name: Optional[str] = None + ) -> Dict[str, Any]: + # 1. get encoder + encoder = self.get_encoder() + # Compatibility with Accelerate big model inference: we need the encoder to outputs stuff on the same device + # as the inputs. + if hasattr(self, "hf_device_map"): + if hasattr(encoder, "_hf_hook"): + encoder._hf_hook.io_same_device = True + else: + add_hook_to_module(encoder, AlignDevicesHook(io_same_device=True)) + + # 2. Prepare encoder args and encoder kwargs from model kwargs. + irrelevant_prefix = ["decoder_", "cross_attn", "use_cache"] + encoder_kwargs = { + argument: value + for argument, value in model_kwargs.items() + if not any(argument.startswith(p) for p in irrelevant_prefix) + } + encoder_signature = set(inspect.signature(encoder.forward).parameters) + encoder_accepts_wildcard = "kwargs" in encoder_signature or "model_kwargs" in encoder_signature + if not encoder_accepts_wildcard: + encoder_kwargs = { + argument: value for argument, value in encoder_kwargs.items() if argument in encoder_signature + } + + # 3. make sure that encoder returns `ModelOutput` + model_input_name = model_input_name if model_input_name is not None else self.main_input_name + encoder_kwargs["return_dict"] = True + encoder_kwargs[model_input_name] = inputs_tensor + model_kwargs["encoder_outputs"]: ModelOutput = encoder(**encoder_kwargs) + + return model_kwargs + + def _prepare_decoder_input_ids_for_generation( + self, + batch_size: int, + model_input_name: str, + model_kwargs: Dict[str, torch.Tensor], + decoder_start_token_id: Union[int, List[int]] = None, + bos_token_id: int = None, + device: torch.device = None, + ) -> Tuple[torch.LongTensor, Dict[str, torch.Tensor]]: + """Prepares `decoder_input_ids` for generation with encoder-decoder models""" + # 1. Check whether the user has defined `decoder_input_ids` manually. To facilitate in terms of input naming, + # we also allow the user to pass it under `input_ids`, if the encoder does not use it as the main input. + if model_kwargs is not None and "decoder_input_ids" in model_kwargs: + decoder_input_ids = model_kwargs.pop("decoder_input_ids") + elif "input_ids" in model_kwargs and model_input_name != "input_ids": + decoder_input_ids = model_kwargs.pop("input_ids") + else: + decoder_input_ids = None + + # 2. Encoder-decoder models expect the `decoder_input_ids` to start with a special token. Let's ensure that. + decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) + if device is None: + device = self.device + if isinstance(decoder_start_token_id, list): + if len(decoder_start_token_id) != batch_size: + raise ValueError( + f"`decoder_start_token_id` expcted to have length {batch_size} but got {len(decoder_start_token_id)}" + ) + decoder_input_ids_start = torch.tensor(decoder_start_token_id, dtype=torch.long, device=device) + decoder_input_ids_start = decoder_input_ids_start.view(-1, 1) + else: + decoder_input_ids_start = ( + torch.ones((batch_size, 1), dtype=torch.long, device=device) * decoder_start_token_id + ) + + # no user input -> use decoder_start_token_id as decoder_input_ids + if decoder_input_ids is None: + decoder_input_ids = decoder_input_ids_start + # exception: Donut checkpoints have task-specific decoder starts and don't expect a BOS token + elif self.config.model_type == "vision-encoder-decoder" and "donut" in self.name_or_path.lower(): + pass + elif self.config.model_type in ["whisper"]: + pass + # user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust + # decoder_attention_mask if provided) + elif ( + isinstance(decoder_start_token_id, int) + and (decoder_input_ids[:, 0] != decoder_start_token_id).all().item() + ) or ( + isinstance(decoder_start_token_id, torch.Tensor) + and (decoder_input_ids[:, 0] != decoder_start_token_id[:, 0]).all().item() + ): + decoder_input_ids = torch.cat([decoder_input_ids_start, decoder_input_ids], dim=-1) + if "decoder_attention_mask" in model_kwargs: + decoder_attention_mask = model_kwargs["decoder_attention_mask"] + decoder_attention_mask = torch.cat( + (torch.ones_like(decoder_attention_mask)[:, :1], decoder_attention_mask), + dim=-1, + ) + model_kwargs["decoder_attention_mask"] = decoder_attention_mask + + return decoder_input_ids, model_kwargs + + def _get_decoder_start_token_id( + self, decoder_start_token_id: Union[int, List[int]] = None, bos_token_id: int = None + ) -> int: + decoder_start_token_id = ( + decoder_start_token_id + if decoder_start_token_id is not None + else self.generation_config.decoder_start_token_id + ) + bos_token_id = bos_token_id if bos_token_id is not None else self.generation_config.bos_token_id + + if decoder_start_token_id is not None: + return decoder_start_token_id + elif bos_token_id is not None: + return bos_token_id + raise ValueError( + "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." + ) + + @staticmethod + def _expand_inputs_for_generation( + expand_size: int = 1, + is_encoder_decoder: bool = False, + input_ids: Optional[torch.LongTensor] = None, + **model_kwargs, + ) -> Tuple[torch.LongTensor, Dict[str, Any]]: + """Expands tensors from [batch_size, ...] to [batch_size * expand_size, ...]""" + + def _expand_dict_for_generation(dict_to_expand): + for key in dict_to_expand: + if ( + key != "cache_position" + and dict_to_expand[key] is not None + and isinstance(dict_to_expand[key], torch.Tensor) + ): + dict_to_expand[key] = dict_to_expand[key].repeat_interleave(expand_size, dim=0) + return dict_to_expand + + if input_ids is not None: + input_ids = input_ids.repeat_interleave(expand_size, dim=0) + + model_kwargs = _expand_dict_for_generation(model_kwargs) + + if is_encoder_decoder: + if model_kwargs.get("encoder_outputs") is None: + raise ValueError("If `is_encoder_decoder` is True, make sure that `encoder_outputs` is defined.") + model_kwargs["encoder_outputs"] = _expand_dict_for_generation(model_kwargs["encoder_outputs"]) + + return input_ids, model_kwargs + + def _extract_past_from_model_output(self, outputs: ModelOutput, standardize_cache_format: bool = False): + past_key_values = None + if "past_key_values" in outputs: + past_key_values = outputs.past_key_values + elif "mems" in outputs: + past_key_values = outputs.mems + elif "past_buckets_states" in outputs: + past_key_values = outputs.past_buckets_states + + # Bloom fix: standardizes the cache format when requested + if standardize_cache_format and hasattr(self, "_convert_to_standard_cache"): + batch_size = outputs.logits.shape[0] + past_key_values = self._convert_to_standard_cache(past_key_values, batch_size=batch_size) + return past_key_values + + def _update_model_kwargs_for_generation( + self, + outputs: ModelOutput, + model_kwargs: Dict[str, Any], + is_encoder_decoder: bool = False, + standardize_cache_format: bool = False, + ) -> Dict[str, Any]: + # update past_key_values + model_kwargs["past_key_values"] = self._extract_past_from_model_output( + outputs, standardize_cache_format=standardize_cache_format + ) + if getattr(outputs, "state", None) is not None: + model_kwargs["state"] = outputs.state + + # update token_type_ids with last value + if "token_type_ids" in model_kwargs: + token_type_ids = model_kwargs["token_type_ids"] + model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) + + if not is_encoder_decoder: + # update attention mask + if "attention_mask" in model_kwargs: + attention_mask = model_kwargs["attention_mask"] + model_kwargs["attention_mask"] = torch.cat( + [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 + ) + else: + # update decoder attention mask + if "decoder_attention_mask" in model_kwargs: + decoder_attention_mask = model_kwargs["decoder_attention_mask"] + model_kwargs["decoder_attention_mask"] = torch.cat( + [decoder_attention_mask, decoder_attention_mask.new_ones((decoder_attention_mask.shape[0], 1))], + dim=-1, + ) + + if "cache_position" in model_kwargs and model_kwargs["cache_position"] is not None: + model_kwargs["cache_position"] = model_kwargs["cache_position"][-1:] + 1 + + return model_kwargs + + def _reorder_cache(self, past_key_values, beam_idx): + raise NotImplementedError( + f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to" + f" enable beam search for {self.__class__}" + ) + + def _get_candidate_generator( + self, + generation_config: GenerationConfig, + input_ids: torch.LongTensor, + inputs_tensor: torch.Tensor, + assistant_model: "PreTrainedModel", + logits_processor: LogitsProcessorList, + model_kwargs: Dict, + ) -> CandidateGenerator: + """ + Returns the candidate generator to be used in `assisted_generation` + """ + if generation_config.prompt_lookup_num_tokens is not None: + candidate_generator = PromptLookupCandidateGenerator( + num_output_tokens=generation_config.prompt_lookup_num_tokens, + max_matching_ngram_size=generation_config.max_matching_ngram_size, + max_length=generation_config.max_length, + ) + else: + candidate_generator = AssistedCandidateGenerator( + input_ids=input_ids, + assistant_model=assistant_model, + generation_config=generation_config, + logits_processor=logits_processor, + model_kwargs=model_kwargs, + inputs_tensor=inputs_tensor, + ) + return candidate_generator + + def _get_logits_warper( + self, + generation_config: GenerationConfig, + ) -> LogitsProcessorList: + """ + This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsWarper`] instances + used for multinomial sampling. + """ + + # instantiate warpers list + warpers = LogitsProcessorList() + + # In beam methods, we need to keep at least one non-eos token to explore continuations that might have a + # better score (i.e. keep len(list(generation_config.eos_token_id)) + 1) + if generation_config.num_beams > 1: + if isinstance(generation_config.eos_token_id, list): + min_tokens_to_keep = len(generation_config.eos_token_id) + 1 + else: + min_tokens_to_keep = 2 + else: + min_tokens_to_keep = 1 + + # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files + # all samplers can be found in `generation_utils_samplers.py` + if generation_config.temperature is not None and generation_config.temperature != 1.0: + warpers.append(TemperatureLogitsWarper(generation_config.temperature)) + if generation_config.top_k is not None and generation_config.top_k != 0: + warpers.append(TopKLogitsWarper(top_k=generation_config.top_k, min_tokens_to_keep=min_tokens_to_keep)) + if generation_config.top_p is not None and generation_config.top_p < 1.0: + warpers.append(TopPLogitsWarper(top_p=generation_config.top_p, min_tokens_to_keep=min_tokens_to_keep)) + if generation_config.typical_p is not None and generation_config.typical_p < 1.0: + warpers.append( + TypicalLogitsWarper(mass=generation_config.typical_p, min_tokens_to_keep=min_tokens_to_keep) + ) + if generation_config.epsilon_cutoff is not None and 0.0 < generation_config.epsilon_cutoff < 1.0: + warpers.append( + EpsilonLogitsWarper(epsilon=generation_config.epsilon_cutoff, min_tokens_to_keep=min_tokens_to_keep) + ) + if generation_config.eta_cutoff is not None and 0.0 < generation_config.eta_cutoff < 1.0: + warpers.append( + EtaLogitsWarper(epsilon=generation_config.eta_cutoff, min_tokens_to_keep=min_tokens_to_keep) + ) + # `LogitNormalization` should always be the last logit processor, when present + if generation_config.renormalize_logits is True: + warpers.append(LogitNormalization()) + return warpers + + def _get_logits_processor( + self, + generation_config: GenerationConfig, + input_ids_seq_length: int, + encoder_input_ids: torch.LongTensor, + prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], + logits_processor: Optional[LogitsProcessorList], + model_kwargs: Optional[Dict[str, Any]] = None, + negative_prompt_ids: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + ) -> LogitsProcessorList: + """ + This class returns a [`LogitsProcessorList`] list object that contains all relevant [`LogitsProcessor`] + instances used to modify the scores of the language model head. + """ + # instantiate processors list + processors = LogitsProcessorList() + + if generation_config.guidance_scale is not None and generation_config.guidance_scale != 1: + processors.append( + UnbatchedClassifierFreeGuidanceLogitsProcessor( + generation_config.guidance_scale, + self, + unconditional_ids=negative_prompt_ids, + unconditional_attention_mask=negative_prompt_attention_mask, + use_cache=model_kwargs["use_cache"], + ) + ) + if generation_config.sequence_bias is not None: + processors.append(SequenceBiasLogitsProcessor(sequence_bias=generation_config.sequence_bias)) + + if generation_config.diversity_penalty is not None and generation_config.diversity_penalty > 0.0: + processors.append( + HammingDiversityLogitsProcessor( + diversity_penalty=generation_config.diversity_penalty, + num_beams=generation_config.num_beams, + num_beam_groups=generation_config.num_beam_groups, + ) + ) + if ( + generation_config.encoder_repetition_penalty is not None + and generation_config.encoder_repetition_penalty != 1.0 + ): + processors.append( + EncoderRepetitionPenaltyLogitsProcessor( + penalty=generation_config.encoder_repetition_penalty, encoder_input_ids=encoder_input_ids + ) + ) + if generation_config.repetition_penalty is not None and generation_config.repetition_penalty != 1.0: + processors.append(RepetitionPenaltyLogitsProcessor(penalty=generation_config.repetition_penalty)) + if generation_config.no_repeat_ngram_size is not None and generation_config.no_repeat_ngram_size > 0: + processors.append(NoRepeatNGramLogitsProcessor(generation_config.no_repeat_ngram_size)) + if ( + generation_config.encoder_no_repeat_ngram_size is not None + and generation_config.encoder_no_repeat_ngram_size > 0 + ): + processors.append( + EncoderNoRepeatNGramLogitsProcessor(generation_config.encoder_no_repeat_ngram_size, encoder_input_ids) + ) + if generation_config.bad_words_ids is not None: + processors.append( + NoBadWordsLogitsProcessor(generation_config.bad_words_ids, generation_config.eos_token_id) + ) + if ( + generation_config.min_length is not None + and generation_config.eos_token_id is not None + and generation_config.min_length > 0 + ): + processors.append(MinLengthLogitsProcessor(generation_config.min_length, generation_config.eos_token_id)) + if ( + generation_config.min_new_tokens is not None + and generation_config.eos_token_id is not None + and generation_config.min_new_tokens > 0 + ): + processors.append( + MinNewTokensLengthLogitsProcessor( + input_ids_seq_length, generation_config.min_new_tokens, generation_config.eos_token_id + ) + ) + if prefix_allowed_tokens_fn is not None: + processors.append( + PrefixConstrainedLogitsProcessor( + prefix_allowed_tokens_fn, generation_config.num_beams // generation_config.num_beam_groups + ) + ) + if generation_config.forced_bos_token_id is not None: + processors.append(ForcedBOSTokenLogitsProcessor(generation_config.forced_bos_token_id)) + if generation_config.forced_eos_token_id is not None: + processors.append( + ForcedEOSTokenLogitsProcessor(generation_config.max_length, generation_config.forced_eos_token_id) + ) + if generation_config.remove_invalid_values is True: + processors.append(InfNanRemoveLogitsProcessor()) + if generation_config.exponential_decay_length_penalty is not None: + processors.append( + ExponentialDecayLengthPenalty( + generation_config.exponential_decay_length_penalty, + generation_config.eos_token_id, + input_ids_seq_length, + ) + ) + if generation_config.suppress_tokens is not None: + processors.append(SuppressTokensLogitsProcessor(generation_config.suppress_tokens)) + if generation_config.begin_suppress_tokens is not None: + begin_index = input_ids_seq_length + begin_index = ( + begin_index + if (input_ids_seq_length > 1 or generation_config.forced_bos_token_id is None) + else begin_index + 1 + ) + if generation_config.forced_decoder_ids is not None: + # generation starts after the last token that is forced + begin_index += generation_config.forced_decoder_ids[-1][0] + processors.append( + SuppressTokensAtBeginLogitsProcessor(generation_config.begin_suppress_tokens, begin_index) + ) + if generation_config.forced_decoder_ids is not None: + # TODO(Sanchit): deprecate in v4.40 by removing this logic + warnings.warn( + "You have explicitly specified `forced_decoder_ids`. This functionality has been deprecated and will throw an error in v4.40. Please remove the `forced_decoder_ids` argument in favour of `input_ids` or `decoder_input_ids` respectively.", + FutureWarning, + ) + processors.append(ForceTokensLogitsProcessor(generation_config.forced_decoder_ids, _has_warned=True)) + processors = self._merge_criteria_processor_list(processors, logits_processor) + # `LogitNormalization` should always be the last logit processor, when present + if generation_config.renormalize_logits is True: + processors.append(LogitNormalization()) + return processors + + def _get_stopping_criteria( + self, generation_config: GenerationConfig, stopping_criteria: Optional[StoppingCriteriaList] + ) -> StoppingCriteriaList: + criteria = StoppingCriteriaList() + if generation_config.max_length is not None: + max_position_embeddings = getattr(self.config, "max_position_embeddings", None) + criteria.append( + MaxLengthCriteria( + max_length=generation_config.max_length, + max_position_embeddings=max_position_embeddings, + ) + ) + if generation_config.max_time is not None: + criteria.append(MaxTimeCriteria(max_time=generation_config.max_time)) + if generation_config.eos_token_id is not None: + criteria.append(EosTokenCriteria(eos_token_id=generation_config.eos_token_id)) + criteria = self._merge_criteria_processor_list(criteria, stopping_criteria) + return criteria + + def _merge_criteria_processor_list( + self, + default_list: Union[LogitsProcessorList, StoppingCriteriaList], + custom_list: Union[LogitsProcessorList, StoppingCriteriaList], + ) -> Union[LogitsProcessorList, StoppingCriteriaList]: + if len(custom_list) == 0: + return default_list + for default in default_list: + for custom in custom_list: + if type(custom) is type(default): + object_type = "stopping criteria" if isinstance(custom, StoppingCriteria) else "logits processor" + raise ValueError( + f"A custom {object_type} of type {type(custom)} with values {custom} has been passed to" + f" `.generate()`, but it has already been created with the values {default}. {default} has been" + " created by passing the corresponding arguments to generate or by the model's config default" + f" values. If you just want to change the default values of {object_type} consider passing" + f" them as arguments to `.generate()` instead of using a custom {object_type}." + ) + default_list.extend(custom_list) + return default_list + + def compute_transition_scores( + self, + sequences: torch.Tensor, + scores: Tuple[torch.Tensor], + beam_indices: Optional[torch.Tensor] = None, + normalize_logits: bool = False, + ) -> torch.Tensor: + """ + Computes the transition scores of sequences given the generation scores (and beam indices, if beam search was + used). This is a convenient method to quicky obtain the scores of the selected tokens at generation time. + + Parameters: + sequences (`torch.LongTensor`): + The generated sequences. The second dimension (sequence_length) is either equal to `max_length` or + shorter if all batches finished early due to the `eos_token_id`. + scores (`tuple(torch.FloatTensor)`): + Transition scores for each vocabulary token at each generation step. Beam transition scores consisting + of log probabilities of tokens conditioned on log softmax of previously generated tokens in this beam. + Tuple of `torch.FloatTensor` with up to `max_new_tokens` elements (one element for each generated token), + with each tensor of shape `(batch_size*num_beams, config.vocab_size)`. + beam_indices (`torch.LongTensor`, *optional*): + Beam indices of generated token id at each generation step. `torch.LongTensor` of shape + `(batch_size*num_return_sequences, sequence_length)`. Only required if a `num_beams>1` at + generate-time. + normalize_logits (`bool`, *optional*, defaults to `False`): + Whether to normalize the logits (which, for legacy reasons, may be unnormalized). + + Return: + `torch.Tensor`: A `torch.Tensor` of shape `(batch_size*num_return_sequences, sequence_length)` containing + the transition scores (logits) + + Examples: + + ```python + >>> from transformers import GPT2Tokenizer, AutoModelForCausalLM + >>> import numpy as np + + >>> tokenizer = GPT2Tokenizer.from_pretrained("gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") + >>> tokenizer.pad_token_id = tokenizer.eos_token_id + >>> inputs = tokenizer(["Today is"], return_tensors="pt") + + >>> # Example 1: Print the scores for each token generated with Greedy Search + >>> outputs = model.generate(**inputs, max_new_tokens=5, return_dict_in_generate=True, output_scores=True) + >>> transition_scores = model.compute_transition_scores( + ... outputs.sequences, outputs.scores, normalize_logits=True + ... ) + >>> # input_length is the length of the input prompt for decoder-only models, like the GPT family, and 1 for + >>> # encoder-decoder models, like BART or T5. + >>> input_length = 1 if model.config.is_encoder_decoder else inputs.input_ids.shape[1] + >>> generated_tokens = outputs.sequences[:, input_length:] + >>> for tok, score in zip(generated_tokens[0], transition_scores[0]): + ... # | token | token string | log probability | probability + ... print(f"| {tok:5d} | {tokenizer.decode(tok):8s} | {score.numpy():.3f} | {np.exp(score.numpy()):.2%}") + | 262 | the | -1.414 | 24.33% + | 1110 | day | -2.609 | 7.36% + | 618 | when | -2.010 | 13.40% + | 356 | we | -1.859 | 15.58% + | 460 | can | -2.508 | 8.14% + + >>> # Example 2: Reconstruct the sequence scores from Beam Search + >>> outputs = model.generate( + ... **inputs, + ... max_new_tokens=5, + ... num_beams=4, + ... num_return_sequences=4, + ... return_dict_in_generate=True, + ... output_scores=True, + ... ) + >>> transition_scores = model.compute_transition_scores( + ... outputs.sequences, outputs.scores, outputs.beam_indices, normalize_logits=False + ... ) + >>> # If you sum the generated tokens' scores and apply the length penalty, you'll get the sequence scores. + >>> # Tip 1: recomputing the scores is only guaranteed to match with `normalize_logits=False`. Depending on the + >>> # use case, you might want to recompute it with `normalize_logits=True`. + >>> # Tip 2: the output length does NOT include the input length + >>> output_length = np.sum(transition_scores.numpy() < 0, axis=1) + >>> length_penalty = model.generation_config.length_penalty + >>> reconstructed_scores = transition_scores.sum(axis=1) / (output_length**length_penalty) + >>> print(np.allclose(outputs.sequences_scores, reconstructed_scores)) + True + ```""" + # 1. In absence of `beam_indices`, we can assume that we come from e.g. greedy search, which is equivalent + # to a beam search approach were the first (and only) beam is always selected + if beam_indices is None: + beam_indices = torch.arange(scores[0].shape[0]).view(-1, 1).to(sequences.device) + beam_indices = beam_indices.expand(-1, len(scores)) + + # 2. reshape scores as [batch_size*vocab_size, # generation steps] with # generation steps being + # seq_len - input_length + scores = torch.stack(scores).reshape(len(scores), -1).transpose(0, 1) + + # 3. Optionally normalize the logits (across the vocab dimension) + if normalize_logits: + scores = scores.reshape(-1, self.config.vocab_size, scores.shape[-1]) + scores = torch.nn.functional.log_softmax(scores, dim=1) + scores = scores.reshape(-1, scores.shape[-1]) + + # 4. cut beam_indices to longest beam length + beam_indices_mask = beam_indices < 0 + max_beam_length = (1 - beam_indices_mask.long()).sum(-1).max() + beam_indices = beam_indices.clone()[:, :max_beam_length] + beam_indices_mask = beam_indices_mask[:, :max_beam_length] + + # 5. Set indices of beams that finished early to 0; such indices will be masked correctly afterwards + beam_indices[beam_indices_mask] = 0 + + # 6. multiply beam_indices with vocab size to gather correctly from scores + beam_sequence_indices = beam_indices * self.config.vocab_size + + # 7. Define which indices contributed to scores + cut_idx = sequences.shape[-1] - max_beam_length + indices = sequences[:, cut_idx:] + beam_sequence_indices + + # 8. Compute scores + transition_scores = scores.gather(0, indices) + + # 9. Mask out transition_scores of beams that stopped early + transition_scores[beam_indices_mask] = 0 + + return transition_scores + + def _validate_model_class(self): + """ + Confirms that the model class is compatible with generation. If not, raises an exception that points to the + right class to use. + """ + if not self.can_generate(): + generate_compatible_mappings = [ + MODEL_FOR_CAUSAL_LM_MAPPING, + MODEL_FOR_CAUSAL_IMAGE_MODELING_MAPPING, + MODEL_FOR_VISION_2_SEQ_MAPPING, + MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, + MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, + ] + generate_compatible_classes = set() + for model_mapping in generate_compatible_mappings: + supported_models = model_mapping.get(type(self.config), default=None) + if supported_models is not None: + generate_compatible_classes.add(supported_models.__name__) + exception_message = ( + f"The current model class ({self.__class__.__name__}) is not compatible with `.generate()`, as " + "it doesn't have a language model head." + ) + if generate_compatible_classes: + exception_message += f" Please use one of the following classes instead: {generate_compatible_classes}" + raise TypeError(exception_message) + + def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]): + """Validates model kwargs for generation. Generate argument typos will also be caught here.""" + # If a `Cache` instance is passed, checks whether the model is compatible with it + if isinstance(model_kwargs.get("past_key_values", None), Cache) and not self._supports_cache_class: + raise ValueError( + f"{self.__class__.__name__} does not support an instance of `Cache` as `past_key_values`. Please " + "check the model documentation for supported cache formats." + ) + + # Excludes arguments that are handled before calling any model function + if self.config.is_encoder_decoder: + for key in ["decoder_input_ids"]: + model_kwargs.pop(key, None) + + unused_model_args = [] + model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters) + # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If + # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;) + if "kwargs" in model_args or "model_kwargs" in model_args: + model_args |= set(inspect.signature(self.forward).parameters) + + # Encoder-Decoder models may also need Encoder arguments from `model_kwargs` + if self.config.is_encoder_decoder: + base_model = getattr(self, self.base_model_prefix, None) + + # allow encoder kwargs + encoder = getattr(self, "encoder", None) + # `MusicgenForConditionalGeneration` has `text_encoder` and `audio_encoder`. + # Also, it has `base_model_prefix = "encoder_decoder"` but there is no `self.encoder_decoder` + # TODO: A better way to handle this. + if encoder is None and base_model is not None: + encoder = getattr(base_model, "encoder", None) + + if encoder is not None: + encoder_model_args = set(inspect.signature(encoder.forward).parameters) + model_args |= encoder_model_args + + # allow decoder kwargs + decoder = getattr(self, "decoder", None) + if decoder is None and base_model is not None: + decoder = getattr(base_model, "decoder", None) + + if decoder is not None: + decoder_model_args = set(inspect.signature(decoder.forward).parameters) + model_args |= {f"decoder_{x}" for x in decoder_model_args} + + # allow assistant_encoder_outputs to be passed if we're doing assisted generating + if "assistant_encoder_outputs" in model_kwargs: + model_args |= {"assistant_encoder_outputs"} + + for key, value in model_kwargs.items(): + if value is not None and key not in model_args: + unused_model_args.append(key) + + if unused_model_args: + raise ValueError( + f"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the" + " generate arguments will also show up in this list)" + ) + + def _validate_generated_length(self, generation_config, input_ids_length, has_default_max_length): + """Performs validation related to the resulting generated length""" + + # 1. Max length warnings related to poor parameterization + if has_default_max_length and generation_config.max_new_tokens is None and generation_config.max_length == 20: + # 20 is the default max_length of the generation config + warnings.warn( + f"Using the model-agnostic default `max_length` (={generation_config.max_length}) to control the " + "generation length. We recommend setting `max_new_tokens` to control the maximum length of the " + "generation.", + UserWarning, + ) + if input_ids_length >= generation_config.max_length: + input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" + raise ValueError( + f"Input length of {input_ids_string} is {input_ids_length}, but `max_length` is set to" + f" {generation_config.max_length}. This can lead to unexpected behavior. You should consider" + " increasing `max_length` or, better yet, setting `max_new_tokens`." + ) + + # 2. Min length warnings due to unfeasible parameter combinations + min_length_error_suffix = ( + " Generation will stop at the defined maximum length. You should decrease the minimum length and/or " + "increase the maximum length." + ) + if has_default_max_length: + min_length_error_suffix += ( + f" Note that `max_length` is set to {generation_config.max_length}, its default value." + ) + if generation_config.min_length is not None and generation_config.min_length > generation_config.max_length: + warnings.warn( + f"Unfeasible length constraints: `min_length` ({generation_config.min_length}) is larger than" + f" the maximum possible length ({generation_config.max_length})." + min_length_error_suffix, + UserWarning, + ) + if generation_config.min_new_tokens is not None: + min_length = generation_config.min_new_tokens + input_ids_length + if min_length > generation_config.max_length: + warnings.warn( + f"Unfeasible length constraints: `min_new_tokens` ({generation_config.min_new_tokens}), when " + f"added to the prompt length ({input_ids_length}), is larger than" + f" the maximum possible length ({generation_config.max_length})." + min_length_error_suffix, + UserWarning, + ) + + def _prepare_generated_length( + self, + generation_config, + has_default_max_length, + has_default_min_length, + model_input_name, + input_ids_length, + inputs_tensor, + ): + """Prepared max and min length in generaion configs to avoid clashes between similar attributes""" + + if generation_config.max_new_tokens is not None: + if not has_default_max_length and generation_config.max_length is not None: + logger.warning( + f"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=" + f"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" + ) + generation_config.max_length = generation_config.max_new_tokens + input_ids_length + + # if both `inputs_embeds` and `input_ids` are passed, we do not correct the length + # otherwise we need total length [inputs-embeds-len + new-tokens-len] to not go beyond indicated `max_length`` + elif ( + model_input_name == "inputs_embeds" + and input_ids_length != inputs_tensor.shape[1] + and not self.config.is_encoder_decoder + ): + generation_config.max_length -= inputs_tensor.shape[1] + + # same for min length + if generation_config.min_new_tokens is not None: + if not has_default_min_length: + logger.warning( + f"Both `min_new_tokens` (={generation_config.min_new_tokens}) and `min_length`(=" + f"{generation_config.min_length}) seem to have been set. `min_new_tokens` will take precedence. " + "Please refer to the documentation for more information. " + "(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)" + ) + generation_config.min_length = generation_config.min_new_tokens + input_ids_length + + elif ( + model_input_name == "inputs_embeds" + and input_ids_length != inputs_tensor.shape[1] + and not self.config.is_encoder_decoder + ): + generation_config.min_length = max(generation_config.min_length - inputs_tensor.shape[1], 0) + + return generation_config + + def _prepare_generation_config( + self, generation_config: GenerationConfig, **kwargs: Dict + ) -> Tuple[GenerationConfig, Dict]: + """ + Prepares the base generation config, then applies any generation configuration options from kwargs. + """ + # TODO joao: when we can detect `fullgraph=True` in `torch.compile` (https://github.com/pytorch/pytorch/pull/120400) + # replace `is_torchdynamo_compiling` by the corresponding check. As it is, we are being too restrictive with + # the parameterization in `fullgraph=False` so as to enable `fullgraph=True`. + + # priority: `generation_config` argument > `model.generation_config` (the default generation config) + if generation_config is None: + # legacy: users may modify the model configuration to control generation. To trigger this legacy behavior, + # three conditions must be met + # 1) the generation config must have been created from the model config (`_from_model_config` field); + # 2) the generation config must have seen no modification since its creation (the hash is the same); + # 3) the user must have set generation parameters in the model config. + # NOTE: `torch.compile` can't compile `hash`, this legacy support is disabled with compilation. + if ( + not is_torchdynamo_compiling() + and self.generation_config._from_model_config + and self.generation_config._original_object_hash == hash(self.generation_config) + and self.config._has_non_default_generation_parameters() + ): + new_generation_config = GenerationConfig.from_model_config(self.config) + if new_generation_config != self.generation_config: + warnings.warn( + "You have modified the pretrained model configuration to control generation. This is a" + " deprecated strategy to control generation and will be removed soon, in a future version." + " Please use and modify the model generation configuration (see" + " https://huggingface.co/docs/transformers/generation_strategies#default-text-generation-configuration )" + ) + self.generation_config = new_generation_config + generation_config = self.generation_config + + # `torch.compile` can't compile `copy.deepcopy`, arguments in `kwargs` that are part of `generation_config` + # will mutate the object with `.update`. As such, passing these arguments through `kwargs` is disabled. + if is_torchdynamo_compiling(): + model_kwargs = kwargs + generate_attributes_in_kwargs = [ + key for key, value in kwargs.items() if getattr(generation_config, key, None) != value + ] + if len(generate_attributes_in_kwargs) > 0: + raise ValueError( + "`torch.compile` exception: all generation configuration attributes must be passed within a " + f"`generation_config` instance passed to `generate` (found: {generate_attributes_in_kwargs})." + ) + else: + generation_config = copy.deepcopy(generation_config) + model_kwargs = generation_config.update(**kwargs) + + return generation_config, model_kwargs + + @torch.no_grad() + def generate( + self, + inputs: Optional[torch.Tensor] = None, + generation_config: Optional[GenerationConfig] = None, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, + synced_gpus: Optional[bool] = None, + assistant_model: Optional["PreTrainedModel"] = None, + streamer: Optional["BaseStreamer"] = None, + negative_prompt_ids: Optional[torch.Tensor] = None, + negative_prompt_attention_mask: Optional[torch.Tensor] = None, + **kwargs, + ) -> Union[GenerateOutput, torch.LongTensor]: + r""" + + Generates sequences of token ids for models with a language modeling head. + + + + Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the + model's default generation configuration. You can override any `generation_config` by passing the corresponding + parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`. + + For an overview of generation strategies and code examples, check out the [following + guide](../generation_strategies). + + + + Parameters: + inputs (`torch.Tensor` of varying shape depending on the modality, *optional*): + The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the + method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs` + should be in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of + `input_ids`, `input_values`, `input_features`, or `pixel_values`. + generation_config (`~generation.GenerationConfig`, *optional*): + The generation configuration to be used as base parametrization for the generation call. `**kwargs` + passed to generate matching the attributes of `generation_config` will override them. If + `generation_config` is not provided, the default will be used, which has the following loading + priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model + configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s + default values, whose documentation should be checked to parameterize generation. + logits_processor (`LogitsProcessorList`, *optional*): + Custom logits processors that complement the default logits processors built from arguments and + generation config. If a logit processor is passed that is already created with the arguments or a + generation config an error is thrown. This feature is intended for advanced users. + stopping_criteria (`StoppingCriteriaList`, *optional*): + Custom stopping criteria that complements the default stopping criteria built from arguments and a + generation config. If a stopping criteria is passed that is already created with the arguments or a + generation config an error is thrown. If your stopping criteria depends on the `scores` input, make + sure you pass `return_dict_in_generate=True, output_scores=True` to `generate`. This feature is + intended for advanced users. + prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*): + If provided, this function constraints the beam search to allowed tokens only at each step. If not + provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and + `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned + on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful + for constrained generation conditioned on the prefix, as described in [Autoregressive Entity + Retrieval](https://arxiv.org/abs/2010.00904). + synced_gpus (`bool`, *optional*): + Whether to continue running the while loop until max_length. Unless overridden this flag will be set to + `True` under DeepSpeed ZeRO Stage 3 multiple GPUs environment to avoid hanging if one GPU finished + generating before other GPUs. Otherwise it'll be set to `False`. + assistant_model (`PreTrainedModel`, *optional*): + An assistant model that can be used to accelerate generation. The assistant model must have the exact + same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistent model + is much faster than running generation with the model you're calling generate from. As such, the + assistant model should be much smaller. + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + negative_prompt_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + The negative prompt needed for some processors such as CFG. The batch size must match the input batch + size. This is an experimental feature, subject to breaking API changes in future versions. + negative_prompt_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Attention_mask for `negative_prompt_ids`. + kwargs (`Dict[str, Any]`, *optional*): + Ad hoc parametrization of `generation_config` and/or additional model-specific kwargs that will be + forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder + specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*. + + Return: + [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True` + or when `config.return_dict_in_generate=True`) or a `torch.LongTensor`. + + If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.GenerateDecoderOnlyOutput`], + - [`~generation.GenerateBeamDecoderOnlyOutput`] + + If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible + [`~utils.ModelOutput`] types are: + + - [`~generation.GenerateEncoderDecoderOutput`], + - [`~generation.GenerateBeamEncoderDecoderOutput`] + """ + # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call + self._validate_model_class() + generation_config, model_kwargs = self._prepare_generation_config(generation_config, **kwargs) + self._validate_model_kwargs(model_kwargs.copy()) + + # 2. Set generation parameters if not already defined + if synced_gpus is None: + if is_deepspeed_zero3_enabled() and dist.get_world_size() > 1: + synced_gpus = True + else: + synced_gpus = False + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + + if generation_config.pad_token_id is None and generation_config.eos_token_id is not None: + if model_kwargs.get("attention_mask", None) is None: + logger.warning( + "The attention mask and the pad token id were not set. As a consequence, you may observe " + "unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results." + ) + eos_token_id = generation_config.eos_token_id + if isinstance(eos_token_id, list): + eos_token_id = eos_token_id[0] + logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") + generation_config.pad_token_id = eos_token_id + + # 3. Define model inputs + # inputs_tensor has to be defined + # model_input_name is defined if model-specific keyword input is passed + # otherwise model_input_name is None + # all model-specific keyword inputs are removed from `model_kwargs` + inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs( + inputs, generation_config.bos_token_id, model_kwargs + ) + batch_size = inputs_tensor.shape[0] + + # 4. Define other model kwargs + model_kwargs["output_attentions"] = generation_config.output_attentions + model_kwargs["output_hidden_states"] = generation_config.output_hidden_states + # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are + # generating the first new token or not, and we only want to use the embeddings for the first new token) + if not self.config.is_encoder_decoder and model_input_name == "inputs_embeds": + model_kwargs["use_cache"] = True + else: + model_kwargs["use_cache"] = generation_config.use_cache + + accepts_attention_mask = "attention_mask" in set(inspect.signature(self.forward).parameters.keys()) + requires_attention_mask = "encoder_outputs" not in model_kwargs + + if model_kwargs.get("attention_mask", None) is None and requires_attention_mask and accepts_attention_mask: + model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( + inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id + ) + + # decoder-only models should use left-padding for generation + if not self.config.is_encoder_decoder: + # If `input_ids` was given, check if the last id in any sequence is `pad_token_id` + # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off. + if ( + generation_config.pad_token_id is not None + and len(inputs_tensor.shape) == 2 + and torch.sum(inputs_tensor[:, -1] == generation_config.pad_token_id) > 0 + ): + logger.warning( + "A decoder-only architecture is being used, but right-padding was detected! For correct " + "generation results, please set `padding_side='left'` when initializing the tokenizer." + ) + + if self.config.is_encoder_decoder and "encoder_outputs" not in model_kwargs: + # if model is encoder decoder encoder_outputs are created + # and added to `model_kwargs` + model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( + inputs_tensor, model_kwargs, model_input_name + ) + + # 5. Prepare `input_ids` which will be used for auto-regressive generation + if self.config.is_encoder_decoder: + input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation( + batch_size=batch_size, + model_input_name=model_input_name, + model_kwargs=model_kwargs, + decoder_start_token_id=generation_config.decoder_start_token_id, + bos_token_id=generation_config.bos_token_id, + device=inputs_tensor.device, + ) + else: + input_ids = inputs_tensor if model_input_name == "input_ids" else model_kwargs.pop("input_ids") + + if streamer is not None: + streamer.put(input_ids.cpu()) + + # 6. Prepare `max_length` depending on other stopping criteria. + input_ids_length = input_ids.shape[-1] + has_default_max_length = kwargs.get("max_length") is None and generation_config.max_length is not None + has_default_min_length = kwargs.get("min_length") is None and generation_config.min_length is not None + generation_config = self._prepare_generated_length( + generation_config=generation_config, + has_default_max_length=has_default_max_length, + has_default_min_length=has_default_min_length, + model_input_name=model_input_name, + inputs_tensor=inputs_tensor, + input_ids_length=input_ids_length, + ) + + if generation_config.cache_implementation in NEED_SETUP_CACHE_CLASSES_MAPPING: + if generation_config.cache_implementation == "static": + if model_kwargs.get("past_key_values", False) is not False: + raise ValueError( + "Using `past_key_values` argument with `generate()` when using a static KV cache is not supported. Please open an issue in Transformers GitHub repository." + ) + cache_cls = NEED_SETUP_CACHE_CLASSES_MAPPING["static"] + if not callable(getattr(self, "_setup_cache", None)): + raise ValueError( + "The `generation_config` defines a `cache_implementation` that is not compatible with this model." + " Make sure it has a `_setup_cache` function." + ) + self._setup_cache(cache_cls, max_batch_size=batch_size, max_cache_len=generation_config.max_length) + + self._validate_generated_length(generation_config, input_ids_length, has_default_max_length) + + # 7. determine generation mode + generation_mode = generation_config.get_generation_mode(assistant_model) + + if streamer is not None and (generation_config.num_beams > 1): + raise ValueError( + "`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1." + ) + + if self.device.type != input_ids.device.type: + warnings.warn( + "You are calling .generate() with the `input_ids` being on a device type different" + f" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model" + f" is on {self.device.type}. You may experience unexpected behaviors or slower generation." + " Please make sure that you have put `input_ids` to the" + f" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before" + " running `.generate()`.", + UserWarning, + ) + + # 8. prepare distribution pre_processing samplers + prepared_logits_processor = self._get_logits_processor( + generation_config=generation_config, + input_ids_seq_length=input_ids_length, + encoder_input_ids=inputs_tensor, + prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, + logits_processor=logits_processor, + model_kwargs=model_kwargs, + negative_prompt_ids=negative_prompt_ids, + negative_prompt_attention_mask=negative_prompt_attention_mask, + ) + + # 9. prepare stopping criteria + prepared_stopping_criteria = self._get_stopping_criteria( + generation_config=generation_config, stopping_criteria=stopping_criteria + ) + # 10. go into different generation modes + if generation_mode == GenerationMode.ASSISTED_GENERATION: + if generation_config.num_return_sequences > 1: + raise ValueError( + "num_return_sequences has to be 1 when doing assisted generate, " + f"but is {generation_config.num_return_sequences}." + ) + if batch_size > 1: + raise ValueError("assisted generate is only supported for batch_size = 1") + if not model_kwargs["use_cache"]: + raise ValueError("assisted generate requires `use_cache=True`") + + # 11. Get the candidate generator, given the parameterization + candidate_generator = self._get_candidate_generator( + generation_config=generation_config, + input_ids=input_ids, + inputs_tensor=inputs_tensor, + assistant_model=assistant_model, + logits_processor=logits_processor, + model_kwargs=model_kwargs, + ) + + # 12. run assisted generate + result = self._assisted_decoding( + input_ids, + candidate_generator=candidate_generator, + do_sample=generation_config.do_sample, + logits_processor=prepared_logits_processor, + logits_warper=self._get_logits_warper(generation_config) if generation_config.do_sample else None, + stopping_criteria=prepared_stopping_criteria, + pad_token_id=generation_config.pad_token_id, + output_scores=generation_config.output_scores, + output_logits=generation_config.output_logits, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + **model_kwargs, + ) + if generation_mode == GenerationMode.GREEDY_SEARCH: + # 11. run greedy search + result = self._greedy_search( + input_ids, + logits_processor=prepared_logits_processor, + stopping_criteria=prepared_stopping_criteria, + pad_token_id=generation_config.pad_token_id, + output_scores=generation_config.output_scores, + output_logits=generation_config.output_logits, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.CONTRASTIVE_SEARCH: + if not model_kwargs["use_cache"]: + raise ValueError("Contrastive search requires `use_cache=True`") + + result = self._contrastive_search( + input_ids, + top_k=generation_config.top_k, + penalty_alpha=generation_config.penalty_alpha, + logits_processor=prepared_logits_processor, + stopping_criteria=prepared_stopping_criteria, + pad_token_id=generation_config.pad_token_id, + output_scores=generation_config.output_scores, + output_logits=generation_config.output_logits, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + sequential=generation_config.low_memory, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.SAMPLE: + # 11. prepare logits warper + logits_warper = self._get_logits_warper(generation_config) + + # 12. expand input_ids with `num_return_sequences` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_return_sequences, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + + # 13. run sample + result = self._sample( + input_ids, + logits_processor=prepared_logits_processor, + logits_warper=logits_warper, + stopping_criteria=prepared_stopping_criteria, + pad_token_id=generation_config.pad_token_id, + output_scores=generation_config.output_scores, + output_logits=generation_config.output_logits, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + streamer=streamer, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.BEAM_SEARCH: + # 11. prepare beam search scorer + beam_scorer = BeamSearchScorer( + batch_size=batch_size, + num_beams=generation_config.num_beams, + device=inputs_tensor.device, + length_penalty=generation_config.length_penalty, + do_early_stopping=generation_config.early_stopping, + num_beam_hyps_to_keep=generation_config.num_return_sequences, + max_length=generation_config.max_length, + ) + # 12. interleave input_ids with `num_beams` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + # 13. run beam search + result = self._beam_search( + input_ids, + beam_scorer, + logits_processor=prepared_logits_processor, + stopping_criteria=prepared_stopping_criteria, + pad_token_id=generation_config.pad_token_id, + output_scores=generation_config.output_scores, + output_logits=generation_config.output_logits, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + sequential=generation_config.low_memory, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.BEAM_SAMPLE: + # 11. prepare logits warper + logits_warper = self._get_logits_warper(generation_config) + + # 12. prepare beam search scorer + beam_scorer = BeamSearchScorer( + batch_size=batch_size, + num_beams=generation_config.num_beams, + device=inputs_tensor.device, + length_penalty=generation_config.length_penalty, + do_early_stopping=generation_config.early_stopping, + num_beam_hyps_to_keep=generation_config.num_return_sequences, + max_length=generation_config.max_length, + ) + + # 13. interleave input_ids with `num_beams` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + + # 14. run beam sample + result = self._beam_sample( + input_ids, + beam_scorer, + logits_processor=prepared_logits_processor, + logits_warper=logits_warper, + stopping_criteria=prepared_stopping_criteria, + pad_token_id=generation_config.pad_token_id, + output_scores=generation_config.output_scores, + output_logits=generation_config.output_logits, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.GROUP_BEAM_SEARCH: + # 11. prepare beam search scorer + beam_scorer = BeamSearchScorer( + batch_size=batch_size, + num_beams=generation_config.num_beams, + device=inputs_tensor.device, + length_penalty=generation_config.length_penalty, + do_early_stopping=generation_config.early_stopping, + num_beam_hyps_to_keep=generation_config.num_return_sequences, + num_beam_groups=generation_config.num_beam_groups, + max_length=generation_config.max_length, + ) + # 12. interleave input_ids with `num_beams` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + # 13. run beam search + result = self._group_beam_search( + input_ids, + beam_scorer, + logits_processor=prepared_logits_processor, + stopping_criteria=prepared_stopping_criteria, + pad_token_id=generation_config.pad_token_id, + output_scores=generation_config.output_scores, + output_logits=generation_config.output_logits, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + **model_kwargs, + ) + + elif generation_mode == GenerationMode.CONSTRAINED_BEAM_SEARCH: + final_constraints = [] + if generation_config.constraints is not None: + final_constraints = generation_config.constraints + + if generation_config.force_words_ids is not None: + + def typeerror(): + raise ValueError( + "`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]` " + f"of positive integers, but is {generation_config.force_words_ids}." + ) + + if ( + not isinstance(generation_config.force_words_ids, list) + or len(generation_config.force_words_ids) == 0 + ): + typeerror() + + for word_ids in generation_config.force_words_ids: + if isinstance(word_ids[0], list): + if not isinstance(word_ids, list) or len(word_ids) == 0: + typeerror() + if any(not isinstance(token_ids, list) for token_ids in word_ids): + typeerror() + if any( + any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) + for token_ids in word_ids + ): + typeerror() + + constraint = DisjunctiveConstraint(word_ids) + else: + if not isinstance(word_ids, list) or len(word_ids) == 0: + typeerror() + if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids): + typeerror() + + constraint = PhrasalConstraint(word_ids) + final_constraints.append(constraint) + + # 11. prepare beam search scorer + constrained_beam_scorer = ConstrainedBeamSearchScorer( + constraints=final_constraints, + batch_size=batch_size, + num_beams=generation_config.num_beams, + device=inputs_tensor.device, + length_penalty=generation_config.length_penalty, + do_early_stopping=generation_config.early_stopping, + num_beam_hyps_to_keep=generation_config.num_return_sequences, + max_length=generation_config.max_length, + ) + # 12. interleave input_ids with `num_beams` additional sequences per batch + input_ids, model_kwargs = self._expand_inputs_for_generation( + input_ids=input_ids, + expand_size=generation_config.num_beams, + is_encoder_decoder=self.config.is_encoder_decoder, + **model_kwargs, + ) + # 13. run beam search + result = self._constrained_beam_search( + input_ids, + constrained_beam_scorer=constrained_beam_scorer, + logits_processor=prepared_logits_processor, + stopping_criteria=prepared_stopping_criteria, + pad_token_id=generation_config.pad_token_id, + output_scores=generation_config.output_scores, + output_logits=generation_config.output_logits, + return_dict_in_generate=generation_config.return_dict_in_generate, + synced_gpus=synced_gpus, + **model_kwargs, + ) + + if generation_config.cache_implementation in NEED_SETUP_CACHE_CLASSES_MAPPING: + if not callable(getattr(self, "_reset_cache", None)): + raise ValueError( + "A `static_cache` was used to generate but there was a failure when trying to release the cache. " + " Make sure this model implements a `_reset_cache` function." + ) + self._reset_cache() + + return result + + def _has_unfinished_sequences(self, this_peer_finished: bool, synced_gpus: bool, device: torch.device) -> bool: + """ + Returns whether there are still unfinished sequences in the device. The existence of unfinished sequences is + fed through `this_peer_finished`. ZeRO stage 3-friendly. + """ + if synced_gpus: + # Under synced_gpus the `forward` call must continue until all gpus complete their sequence. + # The following logic allows an early break if all peers finished generating their sequence + this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0).to(device) + # send 0.0 if we finished, 1.0 otherwise + dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM) + # did all peers finish? the reduced sum will be 0.0 then + if this_peer_finished_flag.item() == 0.0: + return False + elif this_peer_finished: + return False + return True + + def contrastive_search(self, *args, **kwargs): + logger.warning_once( + "Calling `contrastive_search` directly is deprecated and will be removed in v4.41. Use `generate` or a " + "custom generation loop instead.", + ) + return self._contrastive_search(*args, **kwargs) + + @torch.no_grad() + def _contrastive_search( + self, + input_ids: torch.LongTensor, + top_k: Optional[int] = 1, + penalty_alpha: Optional[float] = 0, + logits_processor: Optional[LogitsProcessorList] = None, + logits_warper: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + output_logits: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + streamer: Optional["BaseStreamer"] = None, + sequential: Optional[bool] = None, + **model_kwargs, + ) -> Union[GenerateNonBeamOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **contrastive search** and can + be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin._contrastive_search`] directly. Use + generate() instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + top_k (`int`, *optional*, defaults to 1): + The size of the candidate set that is used to re-rank for contrastive search + penalty_alpha (`float`, *optional*, defaults to 0): + The degeneration penalty for contrastive search; activate when it is larger than 0 + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + logits_warper (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used + to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + output_logits (`bool`, *optional*, defaults to `False`): + Whether or not to return the raw prediction logit scores. See `logits` under returned tensors + for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + sequential (`bool`, *optional*): + Switches topk hidden state computation from parallel to sequential to reduce memory if True. + model_kwargs: + Additional model specific keyword arguments will be forwarded to the `forward` function of the model. + If model is an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`] + or `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForCausalLM, + ... StoppingCriteriaList, + ... MaxLengthCriteria, + ... ) + + >>> tokenizer = AutoTokenizer.from_pretrained("facebook/opt-125m") + >>> model = AutoModelForCausalLM.from_pretrained("facebook/opt-125m") + >>> # set pad_token_id to eos_token_id because OPT does not have a PAD token + >>> model.config.pad_token_id = model.config.eos_token_id + >>> input_prompt = "DeepMind Company is" + >>> input_ids = tokenizer(input_prompt, return_tensors="pt") + >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=64)]) + >>> outputs = model._contrastive_search( + ... **input_ids, penalty_alpha=0.6, top_k=4, stopping_criteria=stopping_criteria + ... ) + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['DeepMind Company is a company that focuses on the development and commercialization of artificial intelligence (AI). DeepMind’s mission is to help people understand and solve problems that are difficult to solve in the world today.\n\nIn this post, we talk about the benefits of deep learning in business and how it'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + if eos_token_id is not None: + logger.warning_once( + "`eos_token_id` is deprecated in this function and will be removed in v4.41, use" + " `stopping_criteria=StoppingCriteriaList([EosTokenCriteria(eos_token_id=eos_token_id)])` instead." + " Otherwise make sure to set `model.generation_config.eos_token_id`", + FutureWarning, + ) + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + else: + # TODO remove when the method is totally private + # need to get `eos_token_id` and add stopping criteria, so that generation does not go forever + eos_token_id = [ + criteria.eos_token_id.tolist() for criteria in stopping_criteria if hasattr(criteria, "eos_token_id") + ] + eos_token_id = eos_token_id[0] if eos_token_id else None + if eos_token_id is None and self.generation_config.eos_token_id is not None: + eos_token_id = self.generation_config.eos_token_id + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + sequential = sequential if sequential is not None else self.generation_config.low_memory + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_logits = output_logits if output_logits is not None else self.generation_config.output_logits + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + # init attention / hidden states / scores tuples + raw_logits = () if (return_dict_in_generate and output_logits) else None + scores = () if (return_dict_in_generate and output_scores) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # keep track of which sequences are already finished + batch_size, cur_len = input_ids.shape + if "inputs_embeds" in model_kwargs: + cur_len = model_kwargs["inputs_embeds"].shape[1] + unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device) + model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device) + + this_peer_finished = False + + while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device): + # if the first step in the loop, encode all the prefix and obtain: (1) past_key_values; + # (2) last_hidden_states; (3) logit_for_next_step; (4) update model kwargs for the next step + if model_kwargs.get("past_key_values") is None: + # prepare inputs + model_kwargs["use_cache"] = True + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + # encode the given prefix and prepare model inputs; encoder-decoder model process the prefix and save + # the `encoder_outputs` + outputs = self( + **model_inputs, return_dict=True, output_hidden_states=True, output_attentions=output_attentions + ) + + # last decoder hidden states will be used to compute the degeneration penalty (cosine similarity with + # previous tokens) + if self.config.is_encoder_decoder: + last_hidden_states = outputs.decoder_hidden_states[-1] + else: + last_hidden_states = outputs.hidden_states[-1] + + # next logit for contrastive search to select top-k candidate tokens + logit_for_next_step = outputs.logits[:, -1, :] + + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + standardize_cache_format=True, + ) + if not sequential: + # Expands model inputs top_k times, for batched forward passes (akin to beam search). + _, model_kwargs = self._expand_inputs_for_generation( + expand_size=top_k, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs + ) + + past_key_values = model_kwargs.get("past_key_values") + if past_key_values is None: + raise ValueError( + f"{self.__class__.__name__} does not support caching and therefore **can't** be used " + "for contrastive search." + ) + elif ( + not isinstance(past_key_values[0], (tuple, torch.Tensor)) + or past_key_values[0][0].shape[0] != batch_size + ): + raise ValueError( + f"{self.__class__.__name__} does not have a standard cache format and therefore **can't** be " + "used for contrastive search without further modifications." + ) + + # contrastive_search main logic start: + # contrastive search decoding consists of two steps: (1) candidate tokens recall; (2) candidate re-rank by + # degeneration penalty + processed_logit_for_next_step = logits_processor(input_ids, logit_for_next_step) + processed_logit_for_next_step = logits_warper(input_ids, processed_logit_for_next_step) + next_probs = nn.functional.softmax(processed_logit_for_next_step, dim=-1) + + top_k_probs, top_k_ids = torch.topk(next_probs, dim=-1, k=top_k) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_logits: + raw_logits += (logit_for_next_step,) + if output_scores: + scores += (processed_logit_for_next_step,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # Replicates the new past_key_values to match the `top_k` candidates + new_key_values = [] + past = model_kwargs["past_key_values"] + for layer in past: + items = [] + # item is either the key or the value matrix + for item in layer: + if sequential: + items.append(item.repeat_interleave(1, dim=0)) + else: + items.append(item.repeat_interleave(top_k, dim=0)) + new_key_values.append(tuple(items)) + if not isinstance(past, DynamicCache): + past = tuple(new_key_values) + else: + for layer_idx in range(len(new_key_values)): + past.key_cache[layer_idx] = new_key_values[layer_idx][0] + past.value_cache[layer_idx] = new_key_values[layer_idx][1] + model_kwargs["past_key_values"] = past + + if sequential: + all_outputs = [] + for i in range(top_k): + # compute the candidate tokens by the language model and collect their hidden_states + next_model_inputs = self.prepare_inputs_for_generation(top_k_ids[:, i].view(-1, 1), **model_kwargs) + + outputs = self( + **next_model_inputs, + return_dict=True, + output_hidden_states=True, + output_attentions=output_attentions, + ) + all_outputs.append(outputs) + outputs = stack_model_outputs(all_outputs) + + else: + # compute the candidate tokens by the language model and collect their hidden_states + # assembles top_k_ids into batch of size k + next_model_inputs = self.prepare_inputs_for_generation(top_k_ids.view(-1, 1), **model_kwargs) + + outputs = self( + **next_model_inputs, + return_dict=True, + output_hidden_states=True, + output_attentions=output_attentions, + ) + # name is different for encoder-decoder and decoder-only models + if self.config.is_encoder_decoder: + next_hidden = outputs.decoder_hidden_states[-1] + full_hidden_states = outputs.decoder_hidden_states + else: + next_hidden = outputs.hidden_states[-1] + full_hidden_states = outputs.hidden_states + + logits = outputs.logits[:, -1, :] + + context_hidden = last_hidden_states.repeat_interleave(top_k, dim=0) + + # compute the degeneration penalty and re-rank the candidates based on the degeneration penalty and the + # model confidence. Keeping `selected_idx` on CPU enables multi-device contrastive search and doesn't + # introduce (noticeable) slowdowns on single-device runs. + selected_idx = _ranking_fast(context_hidden, next_hidden, top_k_probs, penalty_alpha, top_k) + selected_idx = selected_idx.to("cpu") + + # prepare for the next step: (1) next token_id; (2) past_key_values; (3) last_hidden_states for computing + # the degeneration penalty; (4) logits for selecting next top-k candidates; (5) selected tokens scores + # (model confidence minus degeneration penalty); (6) decoder hidden_states + next_tokens = top_k_ids[range(len(top_k_ids)), selected_idx] + next_hidden = torch.stack(torch.split(next_hidden.squeeze(dim=1), top_k)) + next_hidden = next_hidden[range(batch_size), selected_idx, :] + last_hidden_states = torch.cat([last_hidden_states, next_hidden.unsqueeze(1)], dim=1) + + next_decoder_hidden_states = () + for layer in full_hidden_states: + layer = torch.stack(torch.split(layer, top_k))[range(batch_size), selected_idx, :] + next_decoder_hidden_states += (layer,) + + # generate past_key_values cache of only the selected token + if sequential: + next_model_input = self.prepare_inputs_for_generation( + top_k_ids[:, selected_idx].view(-1, 1), **model_kwargs + ) + + selected_outputs = self( + **next_model_input, + return_dict=True, + output_hidden_states=False, + output_attentions=False, + ) + next_past_key_values = selected_outputs["past_key_values"] + + else: + next_past_key_values = self._extract_past_from_model_output(outputs, standardize_cache_format=True) + new_key_values = [] + for layer in next_past_key_values: + items = [] + # item is either the key or the value matrix + for item in layer: + item = torch.stack(torch.split(item, top_k, dim=0)) # [B, K, num_head, seq_len, esz] + item = item[range(batch_size), selected_idx, ...] # [B, num_head, seq_len, esz] + items += [item] + new_key_values += [items] + + if not isinstance(next_past_key_values, DynamicCache): + next_past_key_values = tuple(new_key_values) + else: + for layer_idx in range(len(new_key_values)): + next_past_key_values.key_cache[layer_idx] = new_key_values[layer_idx][0] + next_past_key_values.value_cache[layer_idx] = new_key_values[layer_idx][1] + + logit_for_next_step = torch.stack(torch.split(logits, top_k))[range(batch_size), selected_idx, :] + + # Rebuilds the relevant parts of the model output for the selected token, for use in the next iteration + if self.config.is_encoder_decoder: + next_step_cross_attentions = () + next_step_decoder_attentions = () + if output_attentions: + for layer in outputs.cross_attentions: + layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...] + next_step_cross_attentions += (layer,) + for layer in outputs.decoder_attentions: + layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...] + next_step_decoder_attentions += (layer,) + outputs = Seq2SeqLMOutput( + past_key_values=next_past_key_values, + decoder_hidden_states=next_decoder_hidden_states, + decoder_attentions=next_step_decoder_attentions or None, + cross_attentions=next_step_cross_attentions or None, + ) + else: + next_step_attentions = () + if output_attentions: + for layer in outputs.attentions: + layer = torch.stack(torch.split(layer, top_k, dim=0))[range(batch_size), selected_idx, ...] + next_step_attentions += (layer,) + outputs = CausalLMOutputWithPast( + past_key_values=next_past_key_values, + hidden_states=next_decoder_hidden_states, + attentions=next_step_attentions or None, + ) + # contrastive_search main logic end + + if synced_gpus and this_peer_finished: + continue # don't waste resources running the code we don't need + + # finished sentences should have their next token be a padding token + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + if streamer is not None: + streamer.put(next_tokens.cpu()) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + ) + + # stop when each sentence is finished + unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores) + this_peer_finished = unfinished_sequences.max() == 0 + + if streamer is not None: + streamer.end() + + if return_dict_in_generate: + # Contrastive search works by forward looking at the next token, so we need to exclude it from + # `past_key_values` to be consistent with the other decoding methods + if model_kwargs.get("past_key_values") is not None: + past_key_values = [] + for layer in model_kwargs["past_key_values"]: + layer_past_key_values = [] + for item in layer: + layer_past_key_values.append(item[..., :-1, :]) + past_key_values.append(tuple(layer_past_key_values)) + model_kwargs["past_key_values"] = tuple(past_key_values) + + if self.config.is_encoder_decoder: + return GenerateEncoderDecoderOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return GenerateDecoderOnlyOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return input_ids + + def greedy_search(self, *args, **kwargs): + logger.warning_once( + "Calling `greedy_search` directly is deprecated and will be removed in v4.41. Use `generate` or a " + "custom generation loop instead.", + ) + return self._greedy_search(*args, **kwargs) + + def _greedy_search( + self, + input_ids: torch.LongTensor, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + output_logits: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + streamer: Optional["BaseStreamer"] = None, + **model_kwargs, + ) -> Union[GenerateNonBeamOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be + used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin._greedy_search`] directly. Use generate() + instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + output_logits (`bool`, *optional*, defaults to `False`): + Whether or not to return the raw prediction logit scores. See `logits` under returned tensors + for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + model_kwargs: + Additional model specific keyword arguments will be forwarded to the `forward` function of the model. + If model is an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForCausalLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... StoppingCriteriaList, + ... MaxLengthCriteria, + ... ) + + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") + + >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token + >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id + + >>> input_prompt = "It might be possible to" + >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id), + ... ] + ... ) + >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) + + >>> outputs = model._greedy_search( + ... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria + ... ) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ["It might be possible to get a better understanding of the nature of the problem, but it's not"] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + if eos_token_id is not None: + logger.warning_once( + "`eos_token_id` is deprecated in this function and will be removed in v4.41, use" + " `stopping_criteria=StoppingCriteriaList([EosTokenCriteria(eos_token_id=eos_token_id)])` instead." + " Otherwise make sure to set `model.generation_config.eos_token_id`", + FutureWarning, + ) + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + else: + # TODO remove when the method is totally private + # need to get `eos_token_id` and add stopping criteria, so that generation does not go forever + eos_token_id = [ + criteria.eos_token_id.tolist() for criteria in stopping_criteria if hasattr(criteria, "eos_token_id") + ] + eos_token_id = eos_token_id[0] if eos_token_id else None + if eos_token_id is None and self.generation_config.eos_token_id is not None: + eos_token_id = self.generation_config.eos_token_id + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + # init attention / hidden states / scores tuples + raw_logits = () if (return_dict_in_generate and output_logits) else None + scores = () if (return_dict_in_generate and output_scores) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # keep track of which sequences are already finished + batch_size, cur_len = input_ids.shape + if "inputs_embeds" in model_kwargs: + cur_len = model_kwargs["inputs_embeds"].shape[1] + this_peer_finished = False + unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device) + model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device) + + while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device): + # prepare model inputs + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + # forward pass to get next token + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + continue # don't waste resources running the code we don't need + + next_token_logits = outputs.logits[:, -1, :] + + # pre-process distribution + next_tokens_scores = logits_processor(input_ids, next_token_logits) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_tokens_scores,) + if output_logits: + raw_logits += (next_token_logits,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # argmax + next_tokens = torch.argmax(next_tokens_scores, dim=-1) + + # finished sentences should have their next token be a padding token + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + if streamer is not None: + streamer.put(next_tokens.cpu()) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + ) + + unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores) + this_peer_finished = unfinished_sequences.max() == 0 + + if streamer is not None: + streamer.end() + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + return GenerateEncoderDecoderOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return GenerateDecoderOnlyOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return input_ids + + def sample(self, *args, **kwargs): + logger.warning_once( + "Calling `sample` directly is deprecated and will be removed in v4.41. Use `generate` or a " + "custom generation loop instead.", + ) + return self._sample(*args, **kwargs) + + def _sample( + self, + input_ids: torch.LongTensor, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + logits_warper: Optional[LogitsProcessorList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + output_logits: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + streamer: Optional["BaseStreamer"] = None, + **model_kwargs, + ) -> Union[GenerateNonBeamOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **multinomial sampling** and + can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin._sample`] directly. Use generate() instead. + For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + logits_warper (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used + to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + output_logits (`bool`, *optional*, defaults to `False`): + Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for + more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + model_kwargs: + Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is + an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`] or `torch.LongTensor`: + A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForCausalLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... TopKLogitsWarper, + ... TemperatureLogitsWarper, + ... StoppingCriteriaList, + ... MaxLengthCriteria, + ... ) + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") + + >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token + >>> model.config.pad_token_id = model.config.eos_token_id + >>> model.generation_config.pad_token_id = model.config.eos_token_id + + >>> input_prompt = "Today is a beautiful day, and" + >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... MinLengthLogitsProcessor(15, eos_token_id=model.generation_config.eos_token_id), + ... ] + ... ) + >>> # instantiate logits processors + >>> logits_warper = LogitsProcessorList( + ... [ + ... TopKLogitsWarper(50), + ... TemperatureLogitsWarper(0.7), + ... ] + ... ) + + >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) + + >>> torch.manual_seed(0) # doctest: +IGNORE_RESULT + >>> outputs = model._sample( + ... input_ids, + ... logits_processor=logits_processor, + ... logits_warper=logits_warper, + ... stopping_criteria=stopping_criteria, + ... ) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Today is a beautiful day, and we must do everything possible to make it a day of celebration.'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + if eos_token_id is not None: + logger.warning_once( + "`eos_token_id` is deprecated in this function and will be removed in v4.41, use" + " `stopping_criteria=StoppingCriteriaList([EosTokenCriteria(eos_token_id=eos_token_id)])` instead." + " Otherwise make sure to set `model.generation_config.eos_token_id`", + FutureWarning, + ) + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + else: + # TODO remove when the method is totally private + # need to get `eos_token_id` and add stopping criteria, so that generation does not go forever + eos_token_id = [ + criteria.eos_token_id.tolist() for criteria in stopping_criteria if hasattr(criteria, "eos_token_id") + ] + eos_token_id = eos_token_id[0] if eos_token_id else None + if eos_token_id is None and self.generation_config.eos_token_id is not None: + eos_token_id = self.generation_config.eos_token_id + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_logits = output_logits if output_logits is not None else self.generation_config.output_logits + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + raw_logits = () if (return_dict_in_generate and output_logits) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # keep track of which sequences are already finished + batch_size, cur_len = input_ids.shape + if "inputs_embeds" in model_kwargs: + cur_len = model_kwargs["inputs_embeds"].shape[1] + this_peer_finished = False + unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device) + model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device) + + while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device): + # prepare model inputs + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + # forward pass to get next token + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + continue # don't waste resources running the code we don't need + + next_token_logits = outputs.logits[:, -1, :] + + # pre-process distribution + next_token_scores = logits_processor(input_ids, next_token_logits) + next_token_scores = logits_warper(input_ids, next_token_scores) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_token_scores,) + if output_logits: + raw_logits += (next_token_logits,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # sample + probs = nn.functional.softmax(next_token_scores, dim=-1) + next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) + + # finished sentences should have their next token be a padding token + if eos_token_id is not None: + if pad_token_id is None: + raise ValueError("If `eos_token_id` is defined, make sure that `pad_token_id` is defined.") + next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences) + + # update generated ids, model inputs, and length for next step + input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) + if streamer is not None: + streamer.put(next_tokens.cpu()) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + ) + + unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores) + this_peer_finished = unfinished_sequences.max() == 0 + + if streamer is not None: + streamer.end() + + if return_dict_in_generate: + if self.config.is_encoder_decoder: + return GenerateEncoderDecoderOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return GenerateDecoderOnlyOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return input_ids + + def _temporary_reorder_cache(self, past_key_values, beam_idx): + """ + Temporary function to handle the different types of cache reordering processes while we roll out `Cache`. + + TODO: standardize cache formats and make all models compatible with `Cache`. It would remove the need + for this function, with `Cache.reorder_cache` being the sole remaining code path + """ + model_class = self.__class__.__name__.lower() + # Exception 1: code path for models using the legacy cache format + if isinstance(past_key_values, (tuple, list)): + past_key_values = self._reorder_cache(past_key_values, beam_idx) + # Exception 2: models with different cache formats. These are limited to `DynamicCache` until their + # cache format is standardized, to avoid adding complexity to the codebase. + elif "bloom" in model_class or "gptbigcode" in model_class: + if not isinstance(past_key_values, DynamicCache): + raise ValueError( + f"Using an unsupported cache format with {model_class}. Currently, it only supports the " + "legacy tuple format or `DynamicCache`" + ) + past_key_values = self._reorder_cache(past_key_values, beam_idx) + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + # Standard code path: use the `Cache.reorder_cache` + else: + past_key_values.reorder_cache(beam_idx) + return past_key_values + + def beam_search(self, *args, **kwargs): + logger.warning_once( + "Calling `beam_search` directly is deprecated and will be removed in v4.41. Use `generate` or a " + "custom generation loop instead.", + ) + return self._beam_search(*args, **kwargs) + + def _beam_search( + self, + input_ids: torch.LongTensor, + beam_scorer: BeamScorer, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + output_logits: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + sequential: Optional[bool] = None, + **model_kwargs, + ) -> Union[GenerateBeamOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **beam search decoding** and + can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin._beam_search`] directly. Use generate() + instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + beam_scorer (`BeamScorer`): + An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and + sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_logits (`bool`, *optional*, defaults to `False`): + Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for + more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + sequential (`bool`, defaults to `False`): + By default, beam search has `batch_size * num_beams` as effective batch size (see `beam_search()` for + more details). This flag will avoid parallelizing the beam search and will instead run beam search + sequentially. + model_kwargs: + Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is + an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`generation.GenerateBeamDecoderOnlyOutput`], [`~generation.GenerateBeamEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.GenerateBeamDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.GenerateBeamEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForSeq2SeqLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... BeamSearchScorer, + ... ) + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") + >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base") + + >>> encoder_input_str = "translate English to German: How old are you?" + >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids + + + >>> # lets run beam search using 3 beams + >>> num_beams = 3 + >>> # define decoder start token ids + >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) + >>> input_ids = input_ids * model.config.decoder_start_token_id + + >>> # add encoder_outputs to model keyword arguments + >>> model_kwargs = { + ... "encoder_outputs": model.get_encoder()( + ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True + ... ) + ... } + + >>> # instantiate beam scorer + >>> beam_scorer = BeamSearchScorer( + ... batch_size=1, + ... num_beams=num_beams, + ... device=model.device, + ... ) + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), + ... ] + ... ) + + >>> outputs = model._beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Wie alt bist du?'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + sequential = sequential if sequential is not None else self.generation_config.low_memory + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + if len(stopping_criteria) == 0: + warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + if eos_token_id is not None: + logger.warning_once( + "`eos_token_id` is deprecated in this function and will be removed in v4.41, use" + " `stopping_criteria=StoppingCriteriaList([EosTokenCriteria(eos_token_id=eos_token_id)])` instead." + " Otherwise make sure to set `model.generation_config.eos_token_id`", + FutureWarning, + ) + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + else: + # TODO remove when the method is totally private and beam scorer refactored + # need to get `eos_token_id` and add stopping criteria, so that generation does not go forever + eos_token_id = [ + criteria.eos_token_id.tolist() for criteria in stopping_criteria if hasattr(criteria, "eos_token_id") + ] + eos_token_id = eos_token_id[0] if eos_token_id else None + if eos_token_id is None and self.generation_config.eos_token_id is not None: + eos_token_id = self.generation_config.eos_token_id + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_logits = output_logits if output_logits is not None else self.generation_config.output_logits + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + batch_size = len(beam_scorer._beam_hyps) + num_beams = beam_scorer.num_beams + + batch_beam_size, cur_len = input_ids.shape + if "inputs_embeds" in model_kwargs: + cur_len = model_kwargs["inputs_embeds"].shape[1] + model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device) + + if num_beams * batch_size != batch_beam_size: + raise ValueError( + f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + raw_logits = () if (return_dict_in_generate and output_logits) else None + beam_indices = ( + tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None + ) + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens + # of the first beam are considered to avoid sampling the exact same tokens across all beams. + beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) + beam_scores[:, 1:] = -1e9 + beam_scores = beam_scores.view((batch_size * num_beams,)) + + this_peer_finished = False + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder + + while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device): + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + # if sequential is True, split the input to batches of batch_size and run sequentially + if sequential: + if any( + model_name in self.__class__.__name__.lower() + for model_name in [ + "fsmt", + "reformer", + "bloom", + "ctrl", + "gpt_bigcode", + "transo_xl", + "xlnet", + "cpm", + "jamba", + ] + ): + raise RuntimeError( + f"Currently generation for {self.__class__.__name__} is not supported " + f"for `low_memory beam_search`. Please open an issue on GitHub if you need this feature." + ) + + inputs_per_sub_batches = _split_model_inputs( + model_inputs, split_size=batch_size, full_batch_size=batch_beam_size + ) + outputs_per_sub_batch = [ + self( + **inputs_per_sub_batch, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + for inputs_per_sub_batch in inputs_per_sub_batches + ] + + outputs = stack_model_outputs(outputs_per_sub_batch) + + else: # Unchanged original behavior + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + cur_len = cur_len + 1 + continue # don't waste resources running the code we don't need + + next_token_logits = outputs.logits[:, -1, :] + next_token_scores = nn.functional.log_softmax( + next_token_logits, dim=-1 + ) # (batch_size * num_beams, vocab_size) + + next_token_scores_processed = logits_processor(input_ids, next_token_scores) + next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as( + next_token_scores_processed + ) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_token_scores_processed,) + if output_logits: + raw_logits += (next_token_logits,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # reshape for beam search + vocab_size = next_token_scores.shape[-1] + next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) + + # Sample 1 + len(eos_token_id) next tokens for each beam so we have at least 1 non eos token per beam. + n_eos_tokens = len(eos_token_id) if eos_token_id else 0 + next_token_scores, next_tokens = torch.topk( + next_token_scores, max(2, 1 + n_eos_tokens) * num_beams, dim=1, largest=True, sorted=True + ) + + next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor") + next_tokens = next_tokens % vocab_size + + # stateless + beam_outputs = beam_scorer.process( + input_ids, + next_token_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + + beam_scores = beam_outputs["next_beam_scores"] + beam_next_tokens = beam_outputs["next_beam_tokens"] + beam_idx = beam_outputs["next_beam_indices"] + + input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) + + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + ) + if model_kwargs.get("past_key_values", None) is not None: + model_kwargs["past_key_values"] = self._temporary_reorder_cache( + model_kwargs["past_key_values"], beam_idx + ) + + if return_dict_in_generate and output_scores: + beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) + + # increase cur_len + cur_len = cur_len + 1 + + if beam_scorer.is_done or all(stopping_criteria(input_ids, scores)): + this_peer_finished = True + + sequence_outputs = beam_scorer.finalize( + input_ids, + beam_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + max_length=stopping_criteria.max_length, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + + if return_dict_in_generate: + if not output_scores: + sequence_outputs["sequence_scores"] = None + + if self.config.is_encoder_decoder: + return GenerateBeamEncoderDecoderOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + logits=raw_logits, + beam_indices=sequence_outputs["beam_indices"], + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return GenerateBeamDecoderOnlyOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + logits=raw_logits, + beam_indices=sequence_outputs["beam_indices"], + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return sequence_outputs["sequences"] + + def beam_sample(self, *args, **kwargs): + logger.warning_once( + "Calling `beam_sample` directly is deprecated and will be removed in v4.41. Use `generate` or a " + "custom generation loop instead.", + ) + return self._beam_sample(*args, **kwargs) + + def _beam_sample( + self, + input_ids: torch.LongTensor, + beam_scorer: BeamScorer, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + logits_warper: Optional[LogitsProcessorList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + output_logits: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + **model_kwargs, + ) -> Union[GenerateBeamOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **beam search multinomial + sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin._beam_sample`] directly. Use generate() + instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + beam_scorer (`BeamScorer`): + A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and + sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + logits_warper (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used + to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + output_logits (`bool`, *optional*, defaults to `False`): + Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for + more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + model_kwargs: + Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is + an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.GenerateBeamDecoderOnlyOutput`], [`~generation.GenerateBeamEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.GenerateBeamDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.GenerateBeamEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForSeq2SeqLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... TopKLogitsWarper, + ... TemperatureLogitsWarper, + ... BeamSearchScorer, + ... ) + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") + >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base") + + >>> encoder_input_str = "translate English to German: How old are you?" + >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids + + >>> # lets run beam search using 3 beams + >>> num_beams = 3 + >>> # define decoder start token ids + >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) + >>> input_ids = input_ids * model.config.decoder_start_token_id + + >>> # add encoder_outputs to model keyword arguments + >>> model_kwargs = { + ... "encoder_outputs": model.get_encoder()( + ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True + ... ) + ... } + + >>> # instantiate beam scorer + >>> beam_scorer = BeamSearchScorer( + ... batch_size=1, + ... max_length=model.config.max_length, + ... num_beams=num_beams, + ... device=model.device, + ... ) + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id)] + ... ) + >>> # instantiate logits processors + >>> logits_warper = LogitsProcessorList( + ... [ + ... TopKLogitsWarper(50), + ... TemperatureLogitsWarper(0.7), + ... ] + ... ) + + >>> outputs = model._beam_sample( + ... input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs + ... ) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Wie alt bist du?'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + if eos_token_id is not None: + logger.warning_once( + "`eos_token_id` is deprecated in this function and will be removed in v4.41, use" + " `stopping_criteria=StoppingCriteriaList([EosTokenCriteria(eos_token_id=eos_token_id)])` instead." + " Otherwise make sure to set `model.generation_config.eos_token_id`", + FutureWarning, + ) + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + else: + # TODO remove when the method is totally private and beam scorer refactored + # need to get `eos_token_id` and add stopping criteria, so that generation does not go forever + eos_token_id = [ + criteria.eos_token_id.tolist() for criteria in stopping_criteria if hasattr(criteria, "eos_token_id") + ] + eos_token_id = eos_token_id[0] if eos_token_id else None + if eos_token_id is None and self.generation_config.eos_token_id is not None: + eos_token_id = self.generation_config.eos_token_id + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_logits = output_logits if output_logits is not None else self.generation_config.output_logits + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + batch_size = len(beam_scorer._beam_hyps) + num_beams = beam_scorer.num_beams + + batch_beam_size, cur_len = input_ids.shape + if "inputs_embeds" in model_kwargs: + cur_len = model_kwargs["inputs_embeds"].shape[1] + model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + raw_logits = () if (return_dict_in_generate and output_logits) else None + beam_indices = ( + tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None + ) + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) + beam_scores = beam_scores.view((batch_size * num_beams,)) + + this_peer_finished = False + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder + while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device): + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + cur_len = cur_len + 1 + continue # don't waste resources running the code we don't need + + next_token_logits = outputs.logits[:, -1, :] + + next_token_scores = nn.functional.log_softmax( + next_token_logits, dim=-1 + ) # (batch_size * num_beams, vocab_size) + + next_token_scores_processed = logits_processor(input_ids, next_token_scores) + next_token_scores_processed = logits_warper(input_ids, next_token_scores_processed) + next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as( + next_token_scores_processed + ) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_token_scores_processed,) + if output_logits: + raw_logits += (next_token_logits,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # reshape for beam search + vocab_size = next_token_scores.shape[-1] + next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) + + probs = nn.functional.softmax(next_token_scores, dim=-1) + + next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) + next_token_scores = torch.gather(next_token_scores, -1, next_tokens) + + next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1) + next_tokens = torch.gather(next_tokens, -1, _indices) + + next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor") + next_tokens = next_tokens % vocab_size + + # stateless + beam_outputs = beam_scorer.process( + input_ids, + next_token_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + beam_scores = beam_outputs["next_beam_scores"] + beam_next_tokens = beam_outputs["next_beam_tokens"] + beam_idx = beam_outputs["next_beam_indices"] + + input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) + + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + ) + if model_kwargs.get("past_key_values", None) is not None: + model_kwargs["past_key_values"] = self._temporary_reorder_cache( + model_kwargs["past_key_values"], beam_idx + ) + + if return_dict_in_generate and output_scores: + beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) + + # increase cur_len + cur_len = cur_len + 1 + + if beam_scorer.is_done or all(stopping_criteria(input_ids, scores)): + this_peer_finished = True + + sequence_outputs = beam_scorer.finalize( + input_ids, + beam_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + max_length=stopping_criteria.max_length, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + + if return_dict_in_generate: + if not output_scores: + sequence_outputs["sequence_scores"] = None + + if self.config.is_encoder_decoder: + return GenerateBeamEncoderDecoderOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + logits=raw_logits, + beam_indices=sequence_outputs["beam_indices"], + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return GenerateBeamDecoderOnlyOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + logits=raw_logits, + beam_indices=sequence_outputs["beam_indices"], + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return sequence_outputs["sequences"] + + def group_beam_search(self, *args, **kwargs): + logger.warning_once( + "Calling `group_beam_search` directly is deprecated and will be removed in v4.41. Use `generate` or a " + "custom generation loop instead.", + ) + return self._group_beam_search(*args, **kwargs) + + def _group_beam_search( + self, + input_ids: torch.LongTensor, + beam_scorer: BeamScorer, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + output_logits: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + **model_kwargs, + ): + r""" + Generates sequences of token ids for models with a language modeling head using **diverse beam search + decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin._group_beam_search`] directly. Use + generate() instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + beam_scorer (`BeamScorer`): + An derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and + sorted during generation. For more information, the documentation of [`BeamScorer`] should be read. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + output_logits (`bool`, *optional*, defaults to `False`): + Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for + more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + + model_kwargs: + Additional model specific kwargs that will be forwarded to the `forward` function of the model. If + model is an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.GenerateBeamDecoderOnlyOutput`], [`~generation.GenerateBeamEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.GenerateBeamDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.GenerateBeamEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForSeq2SeqLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... HammingDiversityLogitsProcessor, + ... BeamSearchScorer, + ... ) + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") + >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base") + + >>> encoder_input_str = "translate English to German: How old are you?" + >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids + + + >>> # lets run diverse beam search using 6 beams + >>> num_beams = 6 + >>> # define decoder start token ids + >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) + >>> input_ids = input_ids * model.config.decoder_start_token_id + + >>> # add encoder_outputs to model keyword arguments + >>> model_kwargs = { + ... "encoder_outputs": model.get_encoder()( + ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True + ... ) + ... } + + >>> # instantiate beam scorer + >>> beam_scorer = BeamSearchScorer( + ... batch_size=1, + ... max_length=model.config.max_length, + ... num_beams=num_beams, + ... device=model.device, + ... num_beam_groups=3, + ... ) + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3), + ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), + ... ] + ... ) + + >>> outputs = model._group_beam_search( + ... input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs + ... ) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Wie alt bist du?'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + if eos_token_id is not None: + logger.warning_once( + "`eos_token_id` is deprecated in this function and will be removed in v4.41, use" + " `stopping_criteria=StoppingCriteriaList([EosTokenCriteria(eos_token_id=eos_token_id)])` instead." + " Otherwise make sure to set `model.generation_config.eos_token_id`", + FutureWarning, + ) + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + else: + # TODO remove when the method is totally private and beam scorer refactored + # need to get `eos_token_id` and add stopping criteria, so that generation does not go forever + eos_token_id = [ + criteria.eos_token_id.tolist() for criteria in stopping_criteria if hasattr(criteria, "eos_token_id") + ] + eos_token_id = eos_token_id[0] if eos_token_id else None + if eos_token_id is None and self.generation_config.eos_token_id is not None: + eos_token_id = self.generation_config.eos_token_id + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_logits = output_logits if output_logits is not None else self.generation_config.output_logits + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + num_beams = beam_scorer.num_beams + num_beam_groups = beam_scorer.num_beam_groups + num_sub_beams = num_beams // num_beam_groups + batch_size = len(beam_scorer._beam_hyps) // num_beam_groups + device = input_ids.device + + batch_beam_size, cur_len = input_ids.shape + if "inputs_embeds" in model_kwargs: + cur_len = model_kwargs["inputs_embeds"].shape[1] + model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device) + + if return_dict_in_generate and output_scores: + beam_indices = [tuple(() for _ in range(num_sub_beams * batch_size)) for _ in range(num_beam_groups)] + else: + beam_indices = None + + if num_beams * batch_size != batch_beam_size: + raise ValueError( + f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + raw_logits = () if (return_dict_in_generate and output_logits) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # initialise score of first beam of each group with 0 and the rest with -1e9. This ensures that the beams in + # the same group don't produce same tokens everytime. + beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device) + beam_scores[:, ::num_sub_beams] = 0 + beam_scores = beam_scores.view((batch_size * num_beams,)) + + this_peer_finished = False + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder + while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device): + # predicted tokens in cur_len step + current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device) + + # indices which will form the beams in the next time step + reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device) + + # do one decoder step on all beams of all sentences in batch + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + cur_len = cur_len + 1 + continue # don't waste resources running the code we don't need + + if output_scores: + processed_score = torch.zeros_like(outputs.logits[:, -1, :]) + if output_logits: + raw_logit_score = outputs.logits[:, -1, :] + + for beam_group_idx in range(num_beam_groups): + group_start_idx = beam_group_idx * num_sub_beams + group_end_idx = min(group_start_idx + num_sub_beams, num_beams) + group_size = group_end_idx - group_start_idx + + # indices of beams of current group among all sentences in batch + batch_group_indices = [] + + for batch_idx in range(batch_size): + batch_group_indices.extend( + [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)] + ) + group_input_ids = input_ids[batch_group_indices] + + # select outputs of beams of current group only + next_token_logits = outputs.logits[batch_group_indices, -1, :] + + next_token_scores = nn.functional.log_softmax( + next_token_logits, dim=-1 + ) # (batch_size * group_size, vocab_size) + vocab_size = next_token_scores.shape[-1] + + next_token_scores_processed = logits_processor( + group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx + ) + next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1) + next_token_scores = next_token_scores.expand_as(next_token_scores_processed) + + if output_scores: + processed_score[batch_group_indices] = next_token_scores_processed + + # reshape for beam search + next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size) + + # Sample 1 + len(eos_token_id) next tokens for each beam so we have at least 1 non eos token per beam. + n_eos_tokens = len(eos_token_id) if eos_token_id else 0 + next_token_scores, next_tokens = torch.topk( + next_token_scores, max(2, 1 + n_eos_tokens) * group_size, dim=1, largest=True, sorted=True + ) + + next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor") + next_tokens = next_tokens % vocab_size + + # stateless + process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None + beam_outputs = beam_scorer.process( + group_input_ids, + next_token_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + beam_indices=process_beam_indices, + group_index=beam_group_idx, + decoder_prompt_len=decoder_prompt_len, + ) + beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"] + beam_next_tokens = beam_outputs["next_beam_tokens"] + beam_idx = beam_outputs["next_beam_indices"] + + if return_dict_in_generate and output_scores: + beam_indices[beam_group_idx] = tuple( + beam_indices[beam_group_idx][beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices[0])) + ) + + input_ids[batch_group_indices] = group_input_ids[beam_idx] + group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) + current_tokens[batch_group_indices] = group_input_ids[:, -1] + + # (beam_idx // group_size) -> batch_idx + # (beam_idx % group_size) -> offset of idx inside the group + reordering_indices[batch_group_indices] = ( + num_beams * torch.div(beam_idx, group_size, rounding_mode="floor") + + group_start_idx + + (beam_idx % group_size) + ) + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (processed_score,) + if output_logits: + raw_logits += (raw_logit_score,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1) + + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + ) + if model_kwargs.get("past_key_values", None) is not None: + model_kwargs["past_key_values"] = self._temporary_reorder_cache( + model_kwargs["past_key_values"], reordering_indices + ) + + # increase cur_len + cur_len = cur_len + 1 + + if beam_scorer.is_done or all(stopping_criteria(input_ids, scores)): + this_peer_finished = True + + final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None + sequence_outputs = beam_scorer.finalize( + input_ids, + beam_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + max_length=stopping_criteria.max_length, + beam_indices=final_beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + + if return_dict_in_generate: + if not output_scores: + sequence_outputs["sequence_scores"] = None + + if self.config.is_encoder_decoder: + return GenerateBeamEncoderDecoderOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + logits=raw_logits, + beam_indices=sequence_outputs["beam_indices"], + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return GenerateBeamDecoderOnlyOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + logits=raw_logits, + beam_indices=sequence_outputs["beam_indices"], + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return sequence_outputs["sequences"] + + def constrained_beam_search(self, *args, **kwargs): + logger.warning_once( + "Calling `constrained_beam_search` directly is deprecated and will be removed in v4.41. Use `generate` or a " + "custom generation loop instead.", + ) + return self._constrained_beam_search(*args, **kwargs) + + def _constrained_beam_search( + self, + input_ids: torch.LongTensor, + constrained_beam_scorer: ConstrainedBeamSearchScorer, + logits_processor: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + output_logits: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: Optional[bool] = None, + **model_kwargs, + ) -> Union[GenerateBeamOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **constrained beam search + decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin._constrained_beam_search`] directly. Use + generate() instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + constrained_beam_scorer (`ConstrainedBeamSearchScorer`): + A derived instance of [`BeamScorer`] that defines how beam hypotheses are constructed, stored and + sorted during generation, while satisfying a list of positive constraints. For more information, the + documentation of [`ConstrainedBeamSearchScorer`] should be read. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + logits_warper (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used + to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + max_length (`int`, *optional*, defaults to 20): + **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated + tokens. The maximum length of the sequence to be generated. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + output_logits (`bool`, *optional*, defaults to `False`): + Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for + more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + model_kwargs: + Additional model specific kwargs will be forwarded to the `forward` function of the model. If model is + an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.GenerateBeamDecoderOnlyOutput`], [`~generation.GenerateBeamEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.GenerateBeamDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.GenerateBeamEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForSeq2SeqLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... ConstrainedBeamSearchScorer, + ... PhrasalConstraint, + ... ) + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("google-t5/t5-base") + >>> model = AutoModelForSeq2SeqLM.from_pretrained("google-t5/t5-base") + + >>> encoder_input_str = "translate English to German: How old are you?" + >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids + + + >>> # lets run beam search using 3 beams + >>> num_beams = 3 + >>> # define decoder start token ids + >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) + >>> input_ids = input_ids * model.config.decoder_start_token_id + + >>> # add encoder_outputs to model keyword arguments + >>> model_kwargs = { + ... "encoder_outputs": model.get_encoder()( + ... encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True + ... ) + ... } + + >>> constraint_str = "Sie" + >>> constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # slice to remove eos token + >>> constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] + + + >>> # instantiate beam scorer + >>> beam_scorer = ConstrainedBeamSearchScorer( + ... batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints + ... ) + + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), + ... ] + ... ) + + >>> outputs = model._constrained_beam_search( + ... input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs + ... ) + + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ['Wie alt sind Sie?'] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + if max_length is not None: + warnings.warn( + "`max_length` is deprecated in this function, use" + " `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.", + UserWarning, + ) + stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length) + if len(stopping_criteria) == 0: + warnings.warn("You don't have defined any stopping_criteria, this will likely loop forever", UserWarning) + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + if eos_token_id is not None: + logger.warning_once( + "`eos_token_id` is deprecated in this function and will be removed in v4.41, use" + " `stopping_criteria=StoppingCriteriaList([EosTokenCriteria(eos_token_id=eos_token_id)])` instead." + " Otherwise make sure to set `model.generation_config.eos_token_id`", + FutureWarning, + ) + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + else: + # TODO remove when the method is totally private and beam scorer refactored + # need to get `eos_token_id` and add stopping criteria, so that generation does not go forever + eos_token_id = [ + criteria.eos_token_id.tolist() for criteria in stopping_criteria if hasattr(criteria, "eos_token_id") + ] + eos_token_id = eos_token_id[0] if eos_token_id else None + if eos_token_id is None and self.generation_config.eos_token_id is not None: + eos_token_id = self.generation_config.eos_token_id + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_logits = output_logits if output_logits is not None else self.generation_config.output_logits + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + batch_size = len(constrained_beam_scorer._beam_hyps) + num_beams = constrained_beam_scorer.num_beams + + batch_beam_size, cur_len = input_ids.shape + if "inputs_embeds" in model_kwargs: + cur_len = model_kwargs["inputs_embeds"].shape[1] + model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device) + + if num_beams * batch_size != batch_beam_size: + raise ValueError( + f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + raw_logits = () if (return_dict_in_generate and output_logits) else None + beam_indices = ( + tuple(() for _ in range(batch_beam_size)) if (return_dict_in_generate and output_scores) else None + ) + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # initialise score of first beam with 0 and the rest with -1e9. This makes sure that only tokens + # of the first beam are considered to avoid sampling the exact same tokens across all beams. + beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) + beam_scores[:, 1:] = -1e9 + beam_scores = beam_scores.view((batch_size * num_beams,)) + + this_peer_finished = False + + decoder_prompt_len = input_ids.shape[-1] # record the prompt length of decoder + while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device): + model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) + + outputs = self( + **model_inputs, + return_dict=True, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + if synced_gpus and this_peer_finished: + cur_len = cur_len + 1 + continue # don't waste resources running the code we don't need + + next_token_logits = outputs.logits[:, -1, :] + next_token_scores = nn.functional.log_softmax( + next_token_logits, dim=-1 + ) # (batch_size * num_beams, vocab_size) + + next_token_scores_processed = logits_processor(input_ids, next_token_scores) + + next_token_scores = next_token_scores_processed + beam_scores[:, None].expand_as( + next_token_scores_processed + ) + + scores_for_all_vocab = next_token_scores.clone() + + # Store scores, attentions and hidden_states when required + if return_dict_in_generate: + if output_scores: + scores += (next_token_scores,) + if output_logits: + raw_logits += (next_token_logits,) + if output_attentions: + decoder_attentions += ( + (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) + ) + if self.config.is_encoder_decoder: + cross_attentions += (outputs.cross_attentions,) + + if output_hidden_states: + decoder_hidden_states += ( + (outputs.decoder_hidden_states,) + if self.config.is_encoder_decoder + else (outputs.hidden_states,) + ) + + # reshape for beam search + vocab_size = next_token_scores.shape[-1] + next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) + + # Sample 1 + len(eos_token_id) next tokens for each beam so we have at least 1 non eos token per beam. + n_eos_tokens = len(eos_token_id) if eos_token_id else 0 + next_token_scores, next_tokens = torch.topk( + next_token_scores, max(2, 1 + n_eos_tokens) * num_beams, dim=1, largest=True, sorted=True + ) + + next_indices = (next_tokens / vocab_size).long() + next_tokens = next_tokens % vocab_size + + # stateless + beam_outputs = constrained_beam_scorer.process( + input_ids, + next_token_scores, + next_tokens, + next_indices, + scores_for_all_vocab, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + beam_scores = beam_outputs["next_beam_scores"] + beam_next_tokens = beam_outputs["next_beam_tokens"] + beam_idx = beam_outputs["next_beam_indices"] + + input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + ) + if model_kwargs.get("past_key_values", None) is not None: + model_kwargs["past_key_values"] = self._temporary_reorder_cache( + model_kwargs["past_key_values"], beam_idx + ) + + if return_dict_in_generate and output_scores: + beam_indices = tuple((beam_indices[beam_idx[i]] + (beam_idx[i],) for i in range(len(beam_indices)))) + + # increase cur_len + cur_len = cur_len + 1 + + if constrained_beam_scorer.is_done or all(stopping_criteria(input_ids, scores)): + this_peer_finished = True + + sequence_outputs = constrained_beam_scorer.finalize( + input_ids, + beam_scores, + next_tokens, + next_indices, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + max_length=stopping_criteria.max_length, + beam_indices=beam_indices, + decoder_prompt_len=decoder_prompt_len, + ) + + if return_dict_in_generate: + if not output_scores: + sequence_outputs["sequence_scores"] = None + if self.config.is_encoder_decoder: + return GenerateBeamEncoderDecoderOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + logits=raw_logits, + beam_indices=sequence_outputs["beam_indices"], + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return GenerateBeamDecoderOnlyOutput( + sequences=sequence_outputs["sequences"], + sequences_scores=sequence_outputs["sequence_scores"], + scores=scores, + logits=raw_logits, + beam_indices=sequence_outputs["beam_indices"], + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return sequence_outputs["sequences"] + + def assisted_decoding(self, *args, **kwargs): + logger.warning_once( + "Calling `_assisted_decoding` directly is deprecated and will be removed in v4.41. Use `generate` or a " + "custom generation loop instead.", + ) + return self._assisted_decoding(*args, **kwargs) + + def _assisted_decoding( + self, + input_ids: torch.LongTensor, + candidate_generator: Optional["CandidateGenerator"] = None, + do_sample: bool = False, + logits_processor: Optional[LogitsProcessorList] = None, + logits_warper: Optional[LogitsProcessorList] = None, + stopping_criteria: Optional[StoppingCriteriaList] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[Union[int, List[int]]] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + output_scores: Optional[bool] = None, + output_logits: Optional[bool] = None, + return_dict_in_generate: Optional[bool] = None, + synced_gpus: bool = False, + streamer: Optional["BaseStreamer"] = None, + **model_kwargs, + ) -> Union[GenerateNonBeamOutput, torch.LongTensor]: + r""" + Generates sequences of token ids for models with a language modeling head using **greedy decoding** or + **sample** (depending on `do_sample`), assisted by candidate sequences. Assisted generation is an example of a + candidate decoding strategy. Can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text + models. + + + + In most cases, you do not need to call [`~generation.GenerationMixin._assisted_decoding`] directly. Use + generate() instead. For an overview of generation strategies and code examples, check the [following + guide](../generation_strategies). + + + + Parameters: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + The sequence used as a prompt for the generation. + candidate_generator (`CandidateGenerator`, *optional*): + A derived instance of [`CandidateGenerator`] that defines how candidate sequences are generated. For + more information, the documentation of [`CandidateGenerator`] should be read. + do_sample (`bool`, *optional*, defaults to `False`): + Whether or not to use sampling ; use greedy decoding otherwise. + logits_processor (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`] + used to modify the prediction scores of the language modeling head applied at each generation step. + logits_warper (`LogitsProcessorList`, *optional*): + An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsWarper`] used + to warp the prediction score distribution of the language modeling head applied before multinomial + sampling at each generation step. + stopping_criteria (`StoppingCriteriaList`, *optional*): + An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`] + used to tell if the generation loop should stop. + pad_token_id (`int`, *optional*): + The id of the *padding* token. + eos_token_id (`Union[int, List[int]]`, *optional*): + The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + output_logits (`bool`, *optional*, defaults to `False`): + Whether or not to return the raw prediction logit scores. See `logits` under returned tensors for + more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + synced_gpus (`bool`, *optional*, defaults to `False`): + Whether to continue running the while loop until max_length (needed for ZeRO stage 3) + streamer (`BaseStreamer`, *optional*): + Streamer object that will be used to stream the generated sequences. Generated tokens are passed + through `streamer.put(token_ids)` and the streamer is responsible for any further processing. + model_kwargs: + Additional model specific keyword arguments will be forwarded to the `forward` function of the model. + If model is an encoder-decoder model the kwargs should include `encoder_outputs`. + + Return: + [`~generation.GenerateDecoderOnlyOutput`], [`~generation.GenerateEncoderDecoderOutput`] or + `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a + [`~generation.GenerateDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and + `return_dict_in_generate=True` or a [`~generation.GenerateEncoderDecoderOutput`] if + `model.config.is_encoder_decoder=True`. + + Examples: + + ```python + >>> from transformers import ( + ... AutoTokenizer, + ... AutoModelForCausalLM, + ... LogitsProcessorList, + ... MinLengthLogitsProcessor, + ... StoppingCriteriaList, + ... MaxLengthCriteria, + ... ) + >>> from transformers.generation import AssistedCandidateGenerator + + >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") + >>> model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2") + >>> assistant_model = AutoModelForCausalLM.from_pretrained("distilbert/distilgpt2") + >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token + >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id + >>> input_prompt = "It might be possible to" + >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids + >>> # instantiate logits processors + >>> logits_processor = LogitsProcessorList( + ... [ + ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id), + ... ] + ... ) + >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)]) + >>> candidate_generator = AssistedCandidateGenerator( + ... input_ids=input_ids, + ... assistant_model=assistant_model, + ... generation_config=model.generation_config, + ... logits_processor=logits_processor, + ... model_kwargs={}, + ... ) + >>> outputs = model._assisted_decoding( + ... input_ids, + ... candidate_generator=candidate_generator, + ... logits_processor=logits_processor, + ... stopping_criteria=stopping_criteria, + ... ) + >>> tokenizer.batch_decode(outputs, skip_special_tokens=True) + ["It might be possible to get a better understanding of the nature of the problem, but it's not"] + ```""" + # init values + logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() + logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() + stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList() + pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id + if eos_token_id is not None: + logger.warning_once( + "`eos_token_id` is deprecated in this function and will be removed in v4.41, use" + " `stopping_criteria=StoppingCriteriaList([EosTokenCriteria(eos_token_id=eos_token_id)])` instead." + " Otherwise make sure to set `model.generation_config.eos_token_id`", + FutureWarning, + ) + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + else: + # TODO remove when the method is totally private and beam scorer refactored + # need to get `eos_token_id` and add stopping criteria, so that generation does not go forever + eos_token_id = [ + criteria.eos_token_id.tolist() for criteria in stopping_criteria if hasattr(criteria, "eos_token_id") + ] + eos_token_id = eos_token_id[0] if eos_token_id else None + if eos_token_id is None and self.generation_config.eos_token_id is not None: + eos_token_id = self.generation_config.eos_token_id + stopping_criteria.append(EosTokenCriteria(eos_token_id=eos_token_id)) + + if isinstance(eos_token_id, int): + eos_token_id = [eos_token_id] + output_scores = output_scores if output_scores is not None else self.generation_config.output_scores + output_logits = output_logits if output_logits is not None else self.generation_config.output_logits + output_attentions = ( + output_attentions if output_attentions is not None else self.generation_config.output_attentions + ) + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states + ) + return_dict_in_generate = ( + return_dict_in_generate + if return_dict_in_generate is not None + else self.generation_config.return_dict_in_generate + ) + + # init attention / hidden states / scores tuples + scores = () if (return_dict_in_generate and output_scores) else None + raw_logits = () if (return_dict_in_generate and output_logits) else None + decoder_attentions = () if (return_dict_in_generate and output_attentions) else None + cross_attentions = () if (return_dict_in_generate and output_attentions) else None + decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None + + # if model is an encoder-decoder, retrieve encoder attention weights and hidden states + if return_dict_in_generate and self.config.is_encoder_decoder: + encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None + encoder_hidden_states = ( + model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None + ) + + # keep track of which sequences are already finished + batch_size, cur_len = input_ids.shape + if "inputs_embeds" in model_kwargs: + cur_len = model_kwargs["inputs_embeds"].shape[1] + unfinished_sequences = torch.ones(batch_size, dtype=torch.long, device=input_ids.device) + model_kwargs["cache_position"] = torch.arange(cur_len, device=input_ids.device) + + this_peer_finished = False + while self._has_unfinished_sequences(this_peer_finished, synced_gpus, device=input_ids.device): + cur_len = input_ids.shape[-1] + + # 1. Fetch candidate sequences from a `CandidateGenerator` + candidate_input_ids, candidate_logits = candidate_generator.get_candidates(input_ids) + candidate_input_ids = candidate_input_ids.to(self.device) + if candidate_logits is not None: + candidate_logits = candidate_logits.to(self.device) + + candidate_length = candidate_input_ids.shape[1] - input_ids.shape[1] + is_done_candidate = stopping_criteria(candidate_input_ids, None) + + # 2. Use the original model to obtain the next token logits given the candidate sequence. We obtain + # `candidate_length + 1` relevant logits from this process: in the event that all candidates are correct, + # we use this forward pass to also pick the subsequent logits in the original model. + + # 2.1. Prepare the model inputs + model_kwargs = _prepare_attention_mask( + model_kwargs, candidate_input_ids.shape[1], self.config.is_encoder_decoder + ) + model_kwargs = _prepare_token_type_ids(model_kwargs, candidate_input_ids.shape[1]) + if "cache_position" in model_kwargs: + model_kwargs["cache_position"] = torch.cat( + ( + model_kwargs["cache_position"], + torch.arange(cur_len, cur_len + candidate_length, device=input_ids.device, dtype=torch.long), + ), + dim=0, + ) + + model_inputs = self.prepare_inputs_for_generation(candidate_input_ids, **model_kwargs) + if "num_logits_to_keep" in model_inputs: + model_inputs["num_logits_to_keep"] = candidate_length + 1 + + # 2.2. Run a forward pass on the candidate sequence + outputs = self( + **model_inputs, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + ) + + # 2.3. Process the new logits + new_logits = outputs.logits[:, -candidate_length - 1 :] # excludes the input prompt if present + next_token_logits = new_logits.clone() + if len(logits_processor) > 0: + for i in range(candidate_length + 1): + new_logits[:, i, :] = logits_processor(candidate_input_ids[:, : cur_len + i], new_logits[:, i, :]) + if len(logits_warper) > 0: + for i in range(candidate_length + 1): + new_logits[:, i, :] = logits_warper(candidate_input_ids[:, : cur_len + i], new_logits[:, i, :]) + + # 3. Select the accepted tokens. There are two possible cases: + # Case 1: `do_sample=True` and we have logits for the candidates (originally from speculative decoding) + # 👉 Apply algorithm 1 from the speculative decoding paper (https://arxiv.org/pdf/2211.17192.pdf). + if do_sample and candidate_logits is not None: + valid_tokens, n_matches = _speculative_sampling( + candidate_input_ids, + candidate_logits, + candidate_length, + new_logits, + is_done_candidate, + ) + + # Case 2: all other cases (originally from assisted generation) 👉 Compare the tokens selected from the + # original model logits with the candidate tokens. We can keep the candidate tokens until the first + # mismatch, or until the max length is reached. + else: + if do_sample: + probs = new_logits.softmax(dim=-1) + selected_tokens = torch.multinomial(probs[0, :, :], num_samples=1).squeeze(1)[None, :] + else: + selected_tokens = new_logits.argmax(dim=-1) + + candidate_new_tokens = candidate_input_ids[:, cur_len:] + n_matches = ((~(candidate_new_tokens == selected_tokens[:, :-1])).cumsum(dim=-1) < 1).sum() + + # Ensure we don't generate beyond max_len or an EOS token + if is_done_candidate and n_matches == candidate_length: + n_matches -= 1 + valid_tokens = selected_tokens[:, : n_matches + 1] + + # 4. Update variables according to the number of matching assistant tokens. Remember: the token generated + # by the model after the last candidate match is also valid, as it is generated from a correct sequence. + # Because of this last token, assisted generation search reduces to a normal greedy search/sample if there + # is no match. + + # 4.1. Get the valid continuation, after the matching tokens + input_ids = torch.cat((input_ids, valid_tokens), dim=-1) + if streamer is not None: + streamer.put(valid_tokens.cpu()) + new_cur_len = input_ids.shape[-1] + + # 4.2. Discard past key values relative to unused assistant tokens + new_cache_size = new_cur_len - 1 + outputs.past_key_values = _crop_past_key_values(self, outputs.past_key_values, new_cache_size) + + # 5. Update the candidate generation strategy if needed + candidate_generator.update_candidate_strategy(input_ids, new_logits, n_matches) + + if synced_gpus and this_peer_finished: + continue # don't waste resources running the code we don't need + + # Store scores, attentions and hidden_states when required + # Assistant: modified to append one tuple element per token, as in the other generation methods. + if return_dict_in_generate: + if output_scores: + scores += tuple(new_logits[:, i, :] for i in range(n_matches + 1)) + if output_logits: + raw_logits += (next_token_logits,) + + if "past_key_values" not in model_kwargs: + added_len = new_cur_len + else: + added_len = n_matches + 1 + + if output_attentions: + if self.config.is_encoder_decoder: + cross_attentions = _split_model_outputs( + cross_attentions, outputs.cross_attentions, cur_len, added_len + ) + decoder_attentions = _split_model_outputs( + decoder_attentions, + outputs.decoder_attentions, + cur_len, + added_len, + is_decoder_attention=True, + ) + else: + decoder_attentions = _split_model_outputs( + decoder_attentions, + outputs.attentions, + cur_len, + added_len, + is_decoder_attention=True, + ) + if output_hidden_states: + if self.config.is_encoder_decoder: + decoder_hidden_states = _split_model_outputs( + decoder_hidden_states, outputs.decoder_hidden_states, cur_len, added_len + ) + else: + decoder_hidden_states = _split_model_outputs( + decoder_hidden_states, outputs.hidden_states, cur_len, added_len + ) + + model_kwargs = self._update_model_kwargs_for_generation( + outputs, + model_kwargs, + is_encoder_decoder=self.config.is_encoder_decoder, + ) + + unfinished_sequences = unfinished_sequences & ~stopping_criteria(input_ids, scores) + this_peer_finished = unfinished_sequences.max() == 0 + + if streamer is not None: + streamer.end() + + if ( + hasattr(candidate_generator, "assistant_model") + and candidate_generator.assistant_model.generation_config.num_assistant_tokens_schedule == "heuristic" + ): + candidate_generator.assistant_model.generation_config.num_assistant_tokens = ( + candidate_generator.num_assistant_tokens + ) + if return_dict_in_generate: + if self.config.is_encoder_decoder: + return GenerateEncoderDecoderOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + encoder_attentions=encoder_attentions, + encoder_hidden_states=encoder_hidden_states, + decoder_attentions=decoder_attentions, + cross_attentions=cross_attentions, + decoder_hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return GenerateDecoderOnlyOutput( + sequences=input_ids, + scores=scores, + logits=raw_logits, + attentions=decoder_attentions, + hidden_states=decoder_hidden_states, + past_key_values=model_kwargs.get("past_key_values"), + ) + else: + return input_ids + + +def _speculative_sampling( + candidate_input_ids, + candidate_logits, + candidate_length, + new_logits, + is_done_candidate, +): + """ + Applies sampling as in the speculative decoding paper (https://arxiv.org/pdf/2211.17192.pdf, algorithm 1). Returns + the selected tokens, as well as the number of candidate matches. + + NOTE: Unless otherwise stated, the variable names match those in the paper. + """ + new_candidate_input_ids = candidate_input_ids[:, -candidate_length:] + # Gets the probabilities from the logits. q_i and p_i denote the assistant and model probabilities of the tokens + # selected by the assistant, respectively. + q = candidate_logits.softmax(dim=-1) + q_i = q[:, torch.arange(candidate_length), new_candidate_input_ids].squeeze(0, 1) + p = new_logits.softmax(dim=-1) + p_i = p[:, torch.arange(candidate_length), new_candidate_input_ids].squeeze(0, 1) + probability_ratio = p_i / q_i + + # When probability_ratio > 1 (i.e. q_i(x) < p_i(x), or "assistant probability of the candidate token is smaller + # than the model probability for the same token"), keep the token. Otherwise reject with p = 1 - probability_ratio + # (= keep with p = probability_ratio). Keep all the tokens until the first rejection + r_i = torch.rand_like(probability_ratio) + is_accepted = r_i <= probability_ratio + n_matches = ((~is_accepted).cumsum(dim=-1) < 1).sum() # this is `n` in algorithm 1 + + # Ensure we don't generate beyond max_len or an EOS token (not in algorithm 1, but needed for correct behavior) + if is_done_candidate and n_matches == candidate_length: + # Output length is assumed to be `n_matches + 1`. Since we won't generate another token with the target model + # due to acceptance on EOS we fix `n_matches` + n_matches -= 1 + valid_tokens = new_candidate_input_ids[:, : n_matches + 1] + else: + # Next token selection: if there is a rejection, adjust the distribution from the main model before sampling. + gamma = candidate_logits.shape[1] + p_n_plus_1 = p[:, n_matches, :] + if n_matches < gamma: + q_n_plus_1 = q[:, n_matches, :] + p_prime = torch.clamp((p_n_plus_1 - q_n_plus_1), min=0) + p_prime.div_(p_prime.sum()) + else: + p_prime = p_n_plus_1 + t = torch.multinomial(p_prime, num_samples=1).squeeze(1)[None, :] + + # The selected tokens include the matches (if any) plus the next sampled tokens + if n_matches > 0: + valid_tokens = torch.cat((new_candidate_input_ids[:, :n_matches], t), dim=-1) + else: + valid_tokens = t + + return valid_tokens, n_matches + + +def _split_model_outputs(outputs, new_outputs, cur_len, added_len, is_decoder_attention=False): + """ + Given the (decoder/cross attentions)/(decoder hidden states) for multiple generated tokens, splits it into a tuple + where each member corresponds to a single generated token. + """ + # Retrocompatibility: in our generation functions, the first iteration includes the attention/hidden states for the + # prompt. + if len(outputs) == 0: + new_tuple = () + for layer in new_outputs: + last_dim_size = cur_len if is_decoder_attention else layer.shape[-1] + new_tuple += (layer[..., :cur_len, :last_dim_size],) + outputs += (new_tuple,) + # The first iteration contains the prompt + 1 generated token, let's update the length variables accordingly + cur_len += 1 + added_len -= cur_len + + for i in range(added_len): + new_tuple = () + for layer in new_outputs: + last_dim_size = cur_len + i if is_decoder_attention else layer.shape[-1] + new_tuple += (layer[..., i : i + 1, :last_dim_size],) + outputs += (new_tuple,) + return outputs + + +def _ranking_fast( + context_hidden: torch.FloatTensor, + next_hidden: torch.FloatTensor, + next_top_k_probs: torch.FloatTensor, + alpha: float, + beam_width: int, +) -> torch.FloatTensor: + """ + Reranks the top_k candidates based on a degeneration penalty (cosine similarity with previous tokens), as described + in the paper "A Contrastive Framework for Neural Text Generation". Returns the index of the best candidate for each + row in the batch. + """ + norm_context_hidden = context_hidden / context_hidden.norm(dim=2, keepdim=True) + norm_next_hidden = next_hidden / next_hidden.norm(dim=2, keepdim=True) + cosine_matrix = torch.matmul(norm_context_hidden, norm_next_hidden.transpose(1, 2)).squeeze(-1) # [B*K, S] + degeneration_penalty, _ = torch.max(cosine_matrix, dim=-1) # [B*K] + next_top_k_probs = next_top_k_probs.view(-1) # [B*K] + contrastive_score = (1.0 - alpha) * next_top_k_probs - alpha * degeneration_penalty + contrastive_score = torch.stack(torch.split(contrastive_score, beam_width)) # [B, K] + _, selected_idx = contrastive_score.max(dim=-1) # [B] + return selected_idx + + +def _split(data, full_batch_size: int, split_size: int = None): + """ + Takes care of three cases: + 1. data is a tensor: e.g. last_hidden_state, pooler_output etc. split them on the batch_size dim + 2. data is a tuple: e.g. hidden_states, attentions etc. Keep the tuple as it is and split each tensor in it and + return a list of tuples + 3. data is a tuple of tuples, e.g. past_key_values. Keep the tuple as it is and split each tuple in it and + return a list of tuples of tuples + (see documentation of ModelOutput) + """ + if data is None: + return [None] * (full_batch_size // split_size) + if isinstance(data, torch.Tensor): + return [data[i : i + split_size] for i in range(0, full_batch_size, split_size)] + elif isinstance(data, tuple): + # If the elements of the tuple are also tuples (e.g., past_key_values in our earlier example) + if isinstance(data[0], tuple): + return [ + tuple(tuple(tensor[i : i + split_size] for tensor in inner_tuple) for inner_tuple in data) + for i in range(0, full_batch_size, split_size) + ] + + else: + return [ + tuple(sub_tensor[i : i + split_size] for sub_tensor in data) + for i in range(0, full_batch_size, split_size) + ] + else: + raise ValueError(f"Unexpected attribute type: {type(data)}") + + +def _split_model_inputs( + model_input: Union[ModelOutput, Dict], split_size: int, full_batch_size: int +) -> List[Union[ModelOutput, Dict]]: + """ + Split a ModelOutput object (or its subclasses) or Dict into a list of same-class objects based on a specified split + size. The input object is dict when it was prepared for forward pass and ModelOutput when it was returned from + previous forward pass. + """ + # Edge case: if model_input is None, return a list of Nones + # this happens with Whisper where encoder_outputs is None + if model_input is None: + return [model_input] * (full_batch_size // split_size) + # Infer the class from the object + model_output_cls = type(model_input) + if (full_batch_size % split_size) != 0: + raise ValueError("`full_batch_size` must be divisible by `split_size`") + + if split_size > full_batch_size: + raise ValueError("`split_size` must be smaller or equal to `full_batch_size`") + + # Helper function to split tensors or tuples of tensors + + # Find all the dataclass fields (e.g., last_hidden_state, pooler_output etc.) and split them + keys = ( + model_input.__dataclass_fields__.keys() if hasattr(model_input, "__dataclass_fields__") else model_input.keys() + ) + # We only keep keys that are in the model_input + keys = [k for k in keys if k in model_input] + # Here we can have four types of values: tensors, tuples of tensors and booleans, and encoder_outputs which is a + # ModelOutput object. + # bool should not be split but replicated for each split + bool_keys = [k for k in keys if isinstance(model_input[k], bool) or k == "cache_position"] + keys_to_ignore = ["cache_position", "encoder_outputs", "num_logits_to_keep"] + non_bool_keys = [k for k in keys if not isinstance(model_input[k], bool) and k not in keys_to_ignore] + + # we split the tensors and tuples of tensors + data_split_list = [ + {k: _split(model_input[k], full_batch_size, split_size)[i] for k in non_bool_keys} + for i in range(full_batch_size // split_size) + ] + # bool values are the same and replicated for each split + bool_data = {k: model_input[k] for k in bool_keys} + # encoder_outputs is a ModelOutput object and should be split by its own + if "encoder_outputs" in model_input: + encoder_outputs_split = _split_model_inputs(model_input["encoder_outputs"], split_size, full_batch_size) + data_split_list = [ + {**data_split, "encoder_outputs": encoder_outputs_split[i]} for i, data_split in enumerate(data_split_list) + ] + # num_logits_to_keep should be replicated for each split, similar to bool values + if "num_logits_to_keep" in model_input: + data_split_list = [ + {**data_split, "num_logits_to_keep": model_input["num_logits_to_keep"]} for data_split in data_split_list + ] + + # Convert each dictionary in the list to an object of the inferred class + split_model_inputs: List[Union[ModelOutput, Dict]] = [ + model_output_cls(**data_split, **bool_data) for data_split in data_split_list + ] + + return split_model_inputs + + +def stack_model_outputs(model_outputs: List[ModelOutput]) -> ModelOutput: + """ + Stack a list of ModelOutput objects (or its subclasses) along the batch_size dimension. The function infers the + specific ModelOutput subclass from the list provided. + """ + if not model_outputs: + raise ValueError("Input list is empty.") + + # Infer the class from the first object in the list + model_output_cls = type(model_outputs[0]) + + # Ensure all objects are of the same type + if not all(isinstance(obj, model_output_cls) for obj in model_outputs): + raise ValueError("All elements in the list should be of the same type.") + + # Helper function to concat tensors or tuples of tensors + def _concat(data): + """ + Reverse of `_split` function above. + """ + if any(data is None for data in data): + return None + if isinstance(data[0], torch.Tensor): + return torch.cat(data, dim=0) + elif isinstance(data[0], tuple): + # If the elements of the tuple are also tuples (e.g., past_key_values in our earlier example) + if isinstance(data[0][0], tuple): + return tuple( + tuple(torch.cat([attr[i][j] for attr in data], dim=0) for j in range(len(data[0][0]))) + for i in range(len(data[0])) + ) + else: + return tuple(torch.cat([attr[i] for attr in data], dim=0) for i in range(len(data[0]))) + elif isinstance(data[0], (int, float)): + # If the elements are integers or floats, return a tensor + return torch.tensor(data) + else: + raise ValueError(f"Unexpected attribute type: {type(data[0])}") + + # Use a dictionary comprehension to gather attributes from all objects and concatenate them + concatenated_data = { + k: _concat([getattr(model_output, k) for model_output in model_outputs]) + for k in model_output_cls.__dataclass_fields__.keys() + } + + # Return a new object of the inferred class with the concatenated attributes + return model_output_cls(**concatenated_data) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__init__.py b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..33350c83a2c161ee228677e8f6fc4b495e9c05bb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__init__.py @@ -0,0 +1,49 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import TYPE_CHECKING + +from ..utils import _LazyModule + + +_import_structure = { + "config": [ + "EXTERNAL_DATA_FORMAT_SIZE_LIMIT", + "OnnxConfig", + "OnnxConfigWithPast", + "OnnxSeq2SeqConfigWithPast", + "PatchingSpec", + ], + "convert": ["export", "validate_model_outputs"], + "features": ["FeaturesManager"], + "utils": ["ParameterFormat", "compute_serialized_parameters_size"], +} + + +if TYPE_CHECKING: + from .config import ( + EXTERNAL_DATA_FORMAT_SIZE_LIMIT, + OnnxConfig, + OnnxConfigWithPast, + OnnxSeq2SeqConfigWithPast, + PatchingSpec, + ) + from .convert import export, validate_model_outputs + from .features import FeaturesManager + from .utils import ParameterFormat, compute_serialized_parameters_size + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__main__.py b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..e3dc6dfb78aa16fa32ea4a30eb4933a0653c7264 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__main__.py @@ -0,0 +1,242 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import subprocess +import sys +import warnings +from argparse import ArgumentParser +from pathlib import Path + +from packaging import version + +from .. import AutoFeatureExtractor, AutoImageProcessor, AutoProcessor, AutoTokenizer +from ..utils import logging +from ..utils.import_utils import is_optimum_available +from .convert import export, validate_model_outputs +from .features import FeaturesManager +from .utils import get_preprocessor + + +MIN_OPTIMUM_VERSION = "1.5.0" + +ENCODER_DECODER_MODELS = ["vision-encoder-decoder"] + + +def export_with_optimum(args): + if is_optimum_available(): + from optimum.version import __version__ as optimum_version + + parsed_optimum_version = version.parse(optimum_version) + if parsed_optimum_version < version.parse(MIN_OPTIMUM_VERSION): + raise RuntimeError( + f"transformers.onnx requires optimum >= {MIN_OPTIMUM_VERSION} but {optimum_version} is installed. You " + "can upgrade optimum by running: pip install -U optimum[exporters]" + ) + else: + raise RuntimeError( + "transformers.onnx requires optimum to run, you can install the library by running: pip install " + "optimum[exporters]" + ) + cmd_line = [ + sys.executable, + "-m", + "optimum.exporters.onnx", + f"--model {args.model}", + f"--task {args.feature}", + f"--framework {args.framework}" if args.framework is not None else "", + f"{args.output}", + ] + proc = subprocess.Popen(cmd_line, stdout=subprocess.PIPE) + proc.wait() + + logger.info( + "The export was done by optimum.exporters.onnx. We recommend using to use this package directly in future, as " + "transformers.onnx is deprecated, and will be removed in v5. You can find more information here: " + "https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model." + ) + + +def export_with_transformers(args): + args.output = args.output if args.output.is_file() else args.output.joinpath("model.onnx") + if not args.output.parent.exists(): + args.output.parent.mkdir(parents=True) + + # Allocate the model + model = FeaturesManager.get_model_from_feature( + args.feature, args.model, framework=args.framework, cache_dir=args.cache_dir + ) + + model_kind, model_onnx_config = FeaturesManager.check_supported_model_or_raise(model, feature=args.feature) + onnx_config = model_onnx_config(model.config) + + if model_kind in ENCODER_DECODER_MODELS: + encoder_model = model.get_encoder() + decoder_model = model.get_decoder() + + encoder_onnx_config = onnx_config.get_encoder_config(encoder_model.config) + decoder_onnx_config = onnx_config.get_decoder_config( + encoder_model.config, decoder_model.config, feature=args.feature + ) + + if args.opset is None: + args.opset = max(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset) + + if args.opset < min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset): + raise ValueError( + f"Opset {args.opset} is not sufficient to export {model_kind}. At least " + f" {min(encoder_onnx_config.default_onnx_opset, decoder_onnx_config.default_onnx_opset)} is required." + ) + + preprocessor = AutoFeatureExtractor.from_pretrained(args.model) + + onnx_inputs, onnx_outputs = export( + preprocessor, + encoder_model, + encoder_onnx_config, + args.opset, + args.output.parent.joinpath("encoder_model.onnx"), + ) + + validate_model_outputs( + encoder_onnx_config, + preprocessor, + encoder_model, + args.output.parent.joinpath("encoder_model.onnx"), + onnx_outputs, + args.atol if args.atol else encoder_onnx_config.atol_for_validation, + ) + + preprocessor = AutoTokenizer.from_pretrained(args.model) + + onnx_inputs, onnx_outputs = export( + preprocessor, + decoder_model, + decoder_onnx_config, + args.opset, + args.output.parent.joinpath("decoder_model.onnx"), + ) + + validate_model_outputs( + decoder_onnx_config, + preprocessor, + decoder_model, + args.output.parent.joinpath("decoder_model.onnx"), + onnx_outputs, + args.atol if args.atol else decoder_onnx_config.atol_for_validation, + ) + logger.info( + f"All good, model saved at: {args.output.parent.joinpath('encoder_model.onnx').as_posix()}," + f" {args.output.parent.joinpath('decoder_model.onnx').as_posix()}" + ) + + else: + # Instantiate the appropriate preprocessor + if args.preprocessor == "auto": + preprocessor = get_preprocessor(args.model) + elif args.preprocessor == "tokenizer": + preprocessor = AutoTokenizer.from_pretrained(args.model) + elif args.preprocessor == "image_processor": + preprocessor = AutoImageProcessor.from_pretrained(args.model) + elif args.preprocessor == "feature_extractor": + preprocessor = AutoFeatureExtractor.from_pretrained(args.model) + elif args.preprocessor == "processor": + preprocessor = AutoProcessor.from_pretrained(args.model) + else: + raise ValueError(f"Unknown preprocessor type '{args.preprocessor}'") + + # Ensure the requested opset is sufficient + if args.opset is None: + args.opset = onnx_config.default_onnx_opset + + if args.opset < onnx_config.default_onnx_opset: + raise ValueError( + f"Opset {args.opset} is not sufficient to export {model_kind}. " + f"At least {onnx_config.default_onnx_opset} is required." + ) + + onnx_inputs, onnx_outputs = export( + preprocessor, + model, + onnx_config, + args.opset, + args.output, + ) + + if args.atol is None: + args.atol = onnx_config.atol_for_validation + + validate_model_outputs(onnx_config, preprocessor, model, args.output, onnx_outputs, args.atol) + logger.info(f"All good, model saved at: {args.output.as_posix()}") + warnings.warn( + "The export was done by transformers.onnx which is deprecated and will be removed in v5. We recommend" + " using optimum.exporters.onnx in future. You can find more information here:" + " https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model.", + FutureWarning, + ) + + +def main(): + parser = ArgumentParser("Hugging Face Transformers ONNX exporter") + parser.add_argument( + "-m", "--model", type=str, required=True, help="Model ID on huggingface.co or path on disk to load model from." + ) + parser.add_argument( + "--feature", + default="default", + help="The type of features to export the model with.", + ) + parser.add_argument("--opset", type=int, default=None, help="ONNX opset version to export the model with.") + parser.add_argument( + "--atol", type=float, default=None, help="Absolute difference tolerance when validating the model." + ) + parser.add_argument( + "--framework", + type=str, + choices=["pt", "tf"], + default=None, + help=( + "The framework to use for the ONNX export." + " If not provided, will attempt to use the local checkpoint's original framework" + " or what is available in the environment." + ), + ) + parser.add_argument("output", type=Path, help="Path indicating where to store generated ONNX model.") + parser.add_argument("--cache_dir", type=str, default=None, help="Path indicating where to store cache.") + parser.add_argument( + "--preprocessor", + type=str, + choices=["auto", "tokenizer", "feature_extractor", "image_processor", "processor"], + default="auto", + help="Which type of preprocessor to use. 'auto' tries to automatically detect it.", + ) + parser.add_argument( + "--export_with_transformers", + action="store_true", + help=( + "Whether to use transformers.onnx instead of optimum.exporters.onnx to perform the ONNX export. It can be " + "useful when exporting a model supported in transformers but not in optimum, otherwise it is not " + "recommended." + ), + ) + + args = parser.parse_args() + if args.export_with_transformers or not is_optimum_available(): + export_with_transformers(args) + else: + export_with_optimum(args) + + +if __name__ == "__main__": + logger = logging.get_logger("transformers.onnx") # pylint: disable=invalid-name + logger.setLevel(logging.INFO) + main() diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8614b9d79e2e8b579bd43cc54d89159a36b9419 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/__main__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42646885e5200eba255c3bb900717ebe604fb4c5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/__main__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/config.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3ae3cb7980a0516d7f138a3966c59e86deeb5a6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/config.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/convert.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/convert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d025431cf5963db41dec29470eeee1f919a99ea Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/convert.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/features.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/features.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0eaa668b7bf926c95ec7f47f964fa91b48378c35 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/features.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/utils.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d84f0bfedc06a974105916295baae76cdc1f93b0 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/__pycache__/utils.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/config.py b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/config.py new file mode 100644 index 0000000000000000000000000000000000000000..02bf2421f4d2f6dde0c9595b030dfcb9f82031f0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/config.py @@ -0,0 +1,741 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import copy +import dataclasses +import warnings +from abc import ABC, abstractmethod +from collections import OrderedDict +from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Mapping, Optional, Tuple, Union + +import numpy as np +from packaging import version + +from ..utils import TensorType, is_torch_available, is_vision_available, logging +from .utils import ParameterFormat, compute_effective_axis_dimension, compute_serialized_parameters_size + + +if TYPE_CHECKING: + from ..configuration_utils import PretrainedConfig + from ..feature_extraction_utils import FeatureExtractionMixin + from ..image_processing_utils import ImageProcessingMixin + from ..tokenization_utils_base import PreTrainedTokenizerBase + + +if is_vision_available(): + from PIL import Image + +logger = logging.get_logger(__name__) + + +DEFAULT_ONNX_OPSET = 11 + +# 2 Gb +EXTERNAL_DATA_FORMAT_SIZE_LIMIT = 2 * 1024 * 1024 * 1024 + + +@dataclasses.dataclass +class PatchingSpec: + """ + Data class that holds patching specifications. + + Args: + o: Module / object where the op to patch is located + name: Name of the op to monkey patch + custom_op: Custom op that patches the original op + orig_op: Original op that is being patched + op_wrapper: Wrapper (optional) that wraps both the original and custom ops. + It is useful for ops that are class or static methods for instance. + """ + + o: Any + name: str + custom_op: Callable + orig_op: Optional[Callable] = None + op_wrapper: Optional[Callable] = None + + +class OnnxConfig(ABC): + """ + Base class for ONNX exportable model describing metadata on how to export the model through the ONNX format. + """ + + default_fixed_batch = 2 + default_fixed_sequence = 8 + default_fixed_num_choices = 4 + torch_onnx_minimum_version = version.parse("1.8") + _tasks_to_common_outputs = { + "causal-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "default": OrderedDict({"last_hidden_state": {0: "batch", 1: "sequence"}}), + "image-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "image-segmentation": OrderedDict( + { + "logits": {0: "batch", 1: "sequence"}, + "pred_boxes": {0: "batch", 1: "sequence"}, + "pred_masks": {0: "batch", 1: "sequence"}, + } + ), + "masked-im": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "masked-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "multiple-choice": OrderedDict({"logits": {0: "batch"}}), + "object-detection": OrderedDict( + { + "logits": {0: "batch", 1: "sequence"}, + "pred_boxes": {0: "batch", 1: "sequence"}, + } + ), + "question-answering": OrderedDict( + { + "start_logits": {0: "batch", 1: "sequence"}, + "end_logits": {0: "batch", 1: "sequence"}, + } + ), + "semantic-segmentation": OrderedDict({"logits": {0: "batch", 1: "num_labels", 2: "height", 3: "width"}}), + "seq2seq-lm": OrderedDict({"logits": {0: "batch", 1: "decoder_sequence"}}), + "sequence-classification": OrderedDict({"logits": {0: "batch"}}), + "token-classification": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "vision2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + "speech2seq-lm": OrderedDict({"logits": {0: "batch", 1: "sequence"}}), + } + + def __init__(self, config: "PretrainedConfig", task: str = "default", patching_specs: List[PatchingSpec] = None): + self._config = config + + if task not in self._tasks_to_common_outputs: + raise ValueError( + f"{task} is not a supported task, supported tasks: {self._tasks_to_common_outputs.keys()}" + ) + self.task = task + + self._patching_specs = [] + for spec in patching_specs if patching_specs is not None else []: + final_spec = spec + if spec.orig_op is None: + final_spec = dataclasses.replace(spec, orig_op=getattr(spec.o, spec.name)) + self._patching_specs.append(final_spec) + + @classmethod + def from_model_config(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfig": + """ + Instantiate a OnnxConfig for a specific model + + Args: + config: The model's configuration to use when exporting to ONNX + + Returns: + OnnxConfig for this model + """ + return cls(config, task=task) + + @property + @abstractmethod + def inputs(self) -> Mapping[str, Mapping[int, str]]: + """ + Mapping containing the axis definition of the input tensors to provide to the model + + Returns: + For each input: its name associated to the axes symbolic name and the axis position within the tensor + """ + raise NotImplementedError() + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + """ + Mapping containing the axis definition of the output tensors to provide to the model + + Returns: + For each output: its name associated to the axes symbolic name and the axis position within the tensor + """ + common_outputs = self._tasks_to_common_outputs[self.task] + return copy.deepcopy(common_outputs) + + @property + def values_override(self) -> Optional[Mapping[str, Any]]: + """ + Dictionary of keys to override in the model's config before exporting + + Returns: + Dictionary with the keys (and their corresponding values) to override + """ + if hasattr(self._config, "use_cache"): + return {"use_cache": False} + + return None + + @property + def default_batch_size(self) -> int: + """ + The default batch size to use if no other indication + + Returns: + Integer > 0 + """ + # Using 2 avoid ONNX making assumption about single sample batch + return OnnxConfig.default_fixed_batch + + @property + def default_sequence_length(self) -> int: + """ + The default sequence length to use if no other indication + + Returns: + Integer > 0 + """ + return OnnxConfig.default_fixed_sequence + + @property + def default_num_choices(self) -> int: + """ + The default number of choices to use if no other indication + + Returns: + Integer > 0 + """ + return OnnxConfig.default_fixed_num_choices + + @property + def default_onnx_opset(self) -> int: + """ + Which onnx opset to use when exporting the model + + Returns: + Integer ONNX Opset version + """ + return DEFAULT_ONNX_OPSET + + @property + def atol_for_validation(self) -> float: + """ + What absolute tolerance value to use during model conversion validation. + + Returns: + Float absolute tolerance value. + """ + return 1e-5 + + @property + def is_torch_support_available(self) -> bool: + """ + The minimum PyTorch version required to export the model. + + Returns: + `bool`: Whether the installed version of PyTorch is compatible with the model. + """ + if is_torch_available(): + from transformers.utils import get_torch_version + + return version.parse(get_torch_version()) >= self.torch_onnx_minimum_version + else: + return False + + @staticmethod + def use_external_data_format(num_parameters: int) -> bool: + """ + Flag indicating if the model requires using external data format + + Args: + num_parameters: Number of parameter on the model + + Returns: + True if model.num_parameters() * size_of(float32) >= 2Gb False otherwise + """ + + return ( + compute_serialized_parameters_size(num_parameters, ParameterFormat.Float) + >= EXTERNAL_DATA_FORMAT_SIZE_LIMIT + ) + + def _generate_dummy_images( + self, batch_size: int = 2, num_channels: int = 3, image_height: int = 40, image_width: int = 40 + ): + images = [] + for _ in range(batch_size): + data = np.random.rand(image_height, image_width, num_channels) * 255 + images.append(Image.fromarray(data.astype("uint8")).convert("RGB")) + return images + + def _generate_dummy_audio( + self, batch_size: int = 2, sampling_rate: int = 22050, time_duration: float = 5.0, frequency: int = 220 + ): + audio_data = [] + for _ in range(batch_size): + # time variable + t = np.linspace(0, time_duration, int(time_duration * sampling_rate), endpoint=False) + + # generate pure sine wave at `frequency` Hz + audio_data.append(0.5 * np.sin(2 * np.pi * frequency * t)) + + return audio_data + + def generate_dummy_inputs( + self, + preprocessor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin", "ImageProcessingMixin"], + batch_size: int = -1, + seq_length: int = -1, + num_choices: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + num_channels: int = 3, + image_width: int = 40, + image_height: int = 40, + sampling_rate: int = 22050, + time_duration: float = 5.0, + frequency: int = 220, + tokenizer: "PreTrainedTokenizerBase" = None, + ) -> Mapping[str, Any]: + """ + Generate inputs to provide to the ONNX exporter for the specific framework + + Args: + preprocessor: ([`PreTrainedTokenizerBase`], [`FeatureExtractionMixin`], or [`ImageProcessingMixin`]): + The preprocessor associated with this model configuration. + batch_size (`int`, *optional*, defaults to -1): + The batch size to export the model for (-1 means dynamic axis). + num_choices (`int`, *optional*, defaults to -1): + The number of candidate answers provided for multiple choice task (-1 means dynamic axis). + seq_length (`int`, *optional*, defaults to -1): + The sequence length to export the model for (-1 means dynamic axis). + is_pair (`bool`, *optional*, defaults to `False`): + Indicate if the input is a pair (sentence 1, sentence 2) + framework (`TensorType`, *optional*, defaults to `None`): + The framework (PyTorch or TensorFlow) that the tokenizer will generate tensors for. + num_channels (`int`, *optional*, defaults to 3): + The number of channels of the generated images. + image_width (`int`, *optional*, defaults to 40): + The width of the generated images. + image_height (`int`, *optional*, defaults to 40): + The height of the generated images. + sampling_rate (`int`, *optional* defaults to 22050) + The sampling rate for audio data generation. + time_duration (`float`, *optional* defaults to 5.0) + Total seconds of sampling for audio data generation. + frequency (`int`, *optional* defaults to 220) + The desired natural frequency of generated audio. + + Returns: + Mapping[str, Tensor] holding the kwargs to provide to the model's forward function + """ + from ..feature_extraction_utils import FeatureExtractionMixin + from ..image_processing_utils import ImageProcessingMixin + from ..tokenization_utils_base import PreTrainedTokenizerBase + + if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: + raise ValueError("You cannot provide both a tokenizer and a preprocessor to generate dummy inputs.") + if tokenizer is not None: + warnings.warn( + "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" + " `preprocessor` instead.", + FutureWarning, + ) + logger.warning("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") + preprocessor = tokenizer + if isinstance(preprocessor, PreTrainedTokenizerBase): + # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX + batch_size = compute_effective_axis_dimension( + batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 + ) + # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX + token_to_add = preprocessor.num_special_tokens_to_add(is_pair) + seq_length = compute_effective_axis_dimension( + seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add + ) + # Generate dummy inputs according to compute batch and sequence + input_token = ( + preprocessor.unk_token + if (preprocessor.unk_token is not None and len(preprocessor.unk_token) > 0) + else "0" + ) + dummy_input = [" ".join([input_token]) * seq_length] * batch_size + if self.task == "multiple-choice": + # If dynamic axis (-1) we forward with a fixed dimension of 4 candidate answers to avoid optimizations + # made by ONNX + num_choices = compute_effective_axis_dimension( + num_choices, fixed_dimension=OnnxConfig.default_fixed_num_choices, num_token_to_add=0 + ) + dummy_input = dummy_input * num_choices + # The shape of the tokenized inputs values is [batch_size * num_choices, seq_length] + tokenized_input = preprocessor(dummy_input, text_pair=dummy_input) + # Unflatten the tokenized inputs values expanding it to the shape [batch_size, num_choices, seq_length] + for k, v in tokenized_input.items(): + tokenized_input[k] = [v[i : i + num_choices] for i in range(0, len(v), num_choices)] + return dict(tokenized_input.convert_to_tensors(tensor_type=framework)) + return dict(preprocessor(dummy_input, return_tensors=framework)) + elif isinstance(preprocessor, ImageProcessingMixin): + if preprocessor.model_input_names[0] != "pixel_values": + raise ValueError( + f"The `preprocessor` is an image processor ({preprocessor.__class__.__name__}) and expects" + f' `model_input_names[0]` to be "pixel_values", but got {preprocessor.model_input_names[0]}' + ) + # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX + batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) + dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) + return dict(preprocessor(images=dummy_input, return_tensors=framework)) + elif isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "pixel_values": + # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX + batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) + dummy_input = self._generate_dummy_images(batch_size, num_channels, image_height, image_width) + return dict(preprocessor(images=dummy_input, return_tensors=framework)) + elif ( + isinstance(preprocessor, FeatureExtractionMixin) and preprocessor.model_input_names[0] == "input_features" + ): + # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX + batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch) + dummy_input = self._generate_dummy_audio(batch_size, sampling_rate, time_duration, frequency) + return dict(preprocessor(dummy_input, return_tensors=framework)) + else: + raise ValueError( + "Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor." + ) + + def generate_dummy_inputs_onnxruntime(self, reference_model_inputs: Mapping[str, Any]) -> Mapping[str, Any]: + """ + Generate inputs for ONNX Runtime using the reference model inputs. Override this to run inference with seq2seq + models which have the encoder and decoder exported as separate ONNX files. + + Args: + reference_model_inputs ([`Mapping[str, Tensor]`): + Reference inputs for the model. + + Returns: + `Mapping[str, Tensor]`: The mapping holding the kwargs to provide to the model's forward function + """ + return reference_model_inputs + + def patch_ops(self): + for spec in self._patching_specs: + custom_op = spec.custom_op if spec.op_wrapper is None else spec.op_wrapper(spec.custom_op) + setattr(spec.o, spec.name, custom_op) + + def restore_ops(self): + for spec in self._patching_specs: + orig_op = spec.orig_op if spec.op_wrapper is None else spec.op_wrapper(spec.orig_op) + setattr(spec.o, spec.name, orig_op) + + @classmethod + def flatten_output_collection_property(cls, name: str, field: Iterable[Any]) -> Dict[str, Any]: + """ + Flatten any potential nested structure expanding the name of the field with the index of the element within the + structure. + + Args: + name: The name of the nested structure + field: The structure to, potentially, be flattened + + Returns: + (Dict[str, Any]): Outputs with flattened structure and key mapping this new structure. + + """ + from itertools import chain + + return {f"{name}.{idx}": item for idx, item in enumerate(chain.from_iterable(field))} + + +class OnnxConfigWithPast(OnnxConfig, ABC): + def __init__( + self, + config: "PretrainedConfig", + task: str = "default", + patching_specs: List[PatchingSpec] = None, + use_past: bool = False, + ): + super().__init__(config, task=task, patching_specs=patching_specs) + self.use_past = use_past + + @classmethod + def with_past(cls, config: "PretrainedConfig", task: str = "default") -> "OnnxConfigWithPast": + """ + Instantiate a OnnxConfig with `use_past` attribute set to True + + Args: + config: The underlying model's config to use when exporting to ONNX + + Returns: + OnnxConfig with `.use_past = True` + """ + return cls(config, task=task, use_past=True) + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + common_outputs = super().outputs + if self.use_past: + self.fill_with_past_key_values_(common_outputs, direction="outputs") + + return common_outputs + + @property + def values_override(self) -> Optional[Mapping[str, Any]]: + if hasattr(self._config, "use_cache"): + return {"use_cache": self.use_past} + + return None + + @property + def num_layers(self) -> int: + """ + The number of layers attribute retrieved from the model config. Override this for model configs where the + number of layers attribute is not called `num_layers`. + """ + if not hasattr(self._config, "num_layers"): + raise AttributeError( + "could not find the number of layers attribute in the model configuration, override the num_layers" + " property of the model OnnxConfig to solve this" + ) + return self._config.num_layers + + @property + def num_attention_heads(self) -> int: + """ + The number of attention heads attribute retrieved from the model config. Override this for model configs where + the number of attention heads attribute is not called `num_attention_heads`. + """ + if not hasattr(self._config, "num_attention_heads"): + raise AttributeError( + "could not find the number of attention heads attribute in the model configuration, override the" + " num_attention_heads property of the model OnnxConfig to solve this" + ) + return self._config.num_attention_heads + + def generate_dummy_inputs( + self, + tokenizer: "PreTrainedTokenizerBase", + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + # TODO: should we set seq_length = 1 when self.use_past = True? + common_inputs = super().generate_dummy_inputs( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + if self.use_past: + if not is_torch_available(): + raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") + else: + import torch + + batch, seqlen = common_inputs["input_ids"].shape + # Not using the same length for past_key_values + past_key_values_length = seqlen + 2 + shape = ( + batch, + self.num_attention_heads, + past_key_values_length, + self._config.hidden_size // self.num_attention_heads, + ) + + if "attention_mask" in common_inputs: + mask_dtype = common_inputs["attention_mask"].dtype + common_inputs["attention_mask"] = torch.cat( + [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], + dim=1, + ) + + common_inputs["past_key_values"] = [] + for _ in range(self.num_layers): + common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) + + return common_inputs + + def fill_with_past_key_values_( + self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str, inverted_values_shape: bool = False + ): + """ + Fill the input_or_outputs mapping with past_key_values dynamic axes considering. + + Args: + inputs_or_outputs: The mapping to fill. + direction: either "inputs" or "outputs", it specifies whether input_or_outputs is the input mapping or the + output mapping, this is important for axes naming. + inverted_values_shape: + If `True`, store values on dynamic axis 1, else on axis 2. + + """ + if direction not in ["inputs", "outputs"]: + raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') + + name = "past_key_values" if direction == "inputs" else "present" + for i in range(self.num_layers): + inputs_or_outputs[f"{name}.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} + if inverted_values_shape: + inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 1: "past_sequence + sequence"} + else: + inputs_or_outputs[f"{name}.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} + + def _flatten_past_key_values_(self, flattened_output, name, idx, t): + flattened_output[f"{name}.{idx}.key"] = t[0] + flattened_output[f"{name}.{idx}.value"] = t[1] + + def flatten_output_collection_property(self, name: str, field: Iterable[Any]) -> Dict[str, Any]: + flattened_output = {} + if name in ["present", "past_key_values"]: + for idx, t in enumerate(field): + self._flatten_past_key_values_(flattened_output, name, idx, t) + else: + flattened_output = super().flatten_output_collection_property(name, field) + + return flattened_output + + +class OnnxSeq2SeqConfigWithPast(OnnxConfigWithPast): + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + common_outputs = super(OnnxConfigWithPast, self).outputs + # Renaming the outputs axes properly. + for name, axes_names in common_outputs.items(): + sequence_name = "encoder_sequence" if "encoder" in name else "decoder_sequence" + for axis_idx, name in axes_names.items(): + if "sequence" in name: + axes_names[axis_idx] = sequence_name + # We reset the value as the order in common_outputs (OrderedDict) is lost otherwise + else: + axes_names[axis_idx] = name + if self.use_past: + self.fill_with_past_key_values_(common_outputs, direction="outputs") + + return common_outputs + + @property + def num_layers(self) -> Tuple[int]: + try: + num_layers = super().num_layers + num_layers = (num_layers, num_layers) + except AttributeError: + if hasattr(self._config, "encoder_layers") and hasattr(self._config, "decoder_layers"): + num_layers = (self._config.encoder_layers, self._config.decoder_layers) + else: + raise AttributeError( + "could not find the number of encoder and decoder layers attributes in the model configuration," + " override the num_layers property of the model OnnxConfig to solve this" + ) + + return num_layers + + @property + def num_attention_heads(self) -> Tuple[int]: + try: + num_attention_heads = super().num_attention_heads + num_attention_heads = (num_attention_heads, num_attention_heads) + except AttributeError: + if hasattr(self._config, "encoder_attention_heads") and hasattr(self._config, "decoder_attention_heads"): + num_attention_heads = (self._config.encoder_attention_heads, self._config.decoder_attention_heads) + else: + raise AttributeError( + "could not find the number of attention heads for the encoder and the decoder attributes in the" + " model configuration, override the num_attention_heads property of the model OnnxConfig to solve" + " this" + ) + return num_attention_heads + + def generate_dummy_inputs( + self, + tokenizer: "PreTrainedTokenizerBase", + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + encoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + # Generate decoder inputs + decoder_seq_length = seq_length if not self.use_past else 1 + decoder_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( + tokenizer, batch_size=batch_size, seq_length=decoder_seq_length, is_pair=is_pair, framework=framework + ) + decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} + common_inputs = dict(**encoder_inputs, **decoder_inputs) + + if self.use_past: + if not is_torch_available(): + raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") + else: + import torch + batch = common_inputs["input_ids"].shape[0] + encoder_seq_length = common_inputs["input_ids"].shape[1] + decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] + num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads + encoder_shape = ( + batch, + num_encoder_attention_heads, + encoder_seq_length, + self._config.hidden_size // num_encoder_attention_heads, + ) + decoder_shape = ( + batch, + num_decoder_attention_heads, + # Not using the same length for past_key_values + decoder_seq_length + 3, + self._config.hidden_size // num_decoder_attention_heads, + ) + + common_inputs["past_key_values"] = [] + # If the number of encoder and decoder layers are present in the model configuration, both are considered + num_encoder_layers, num_decoder_layers = self.num_layers + min_num_layers = min(num_encoder_layers, num_decoder_layers) + max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers + remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" + + for _ in range(min_num_layers): + # For encoder-decoder models, past_key_values contains pre-computed values for both the encoder and the + # decoder layers, hence a tuple of 4 tensors instead of 2 + common_inputs["past_key_values"].append( + ( + torch.zeros(decoder_shape), + torch.zeros(decoder_shape), + torch.zeros(encoder_shape), + torch.zeros(encoder_shape), + ) + ) + + # TODO: test this. + shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape + for _ in range(min_num_layers, max_num_layers): + common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) + + return common_inputs + + def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str): + if direction not in ["inputs", "outputs"]: + raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given') + + name = "past_key_values" if direction == "inputs" else "present" + + # If the number of encoder and decoder layers are present in the model configuration, both are considered + num_encoder_layers, num_decoder_layers = self.num_layers + min_num_layers = min(num_encoder_layers, num_decoder_layers) + max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers + remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" + + encoder_sequence = "past_encoder_sequence" + decoder_sequence = "past_decoder_sequence" if direction == "inputs" else "past_decoder_sequence + sequence" + + for i in range(min_num_layers): + inputs_or_outputs[f"{name}.{i}.decoder.key"] = {0: "batch", 2: decoder_sequence} + inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence} + inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence} + inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence} + + for i in range(min_num_layers, max_num_layers): + if remaining_side_name == "encoder": + axes_info = {0: "batch", 2: encoder_sequence} + else: + axes_info = {0: "batch", 2: decoder_sequence} + inputs_or_outputs[f"{name}.{i}.{remaining_side_name}.key"] = axes_info + + def _flatten_past_key_values_(self, flattened_output, name, idx, t): + flattened_output[f"{name}.{idx}.decoder.key"] = t[0] + flattened_output[f"{name}.{idx}.decoder.value"] = t[1] + flattened_output[f"{name}.{idx}.encoder.key"] = t[2] + flattened_output[f"{name}.{idx}.encoder.value"] = t[3] diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/convert.py b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/convert.py new file mode 100644 index 0000000000000000000000000000000000000000..36cd3232d4ba454d70b0565c8f48c895c510536a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/convert.py @@ -0,0 +1,460 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from inspect import signature +from itertools import chain +from pathlib import Path +from typing import TYPE_CHECKING, Iterable, List, Tuple, Union + +import numpy as np +from packaging.version import Version, parse + +from ..tokenization_utils_base import PreTrainedTokenizerBase +from ..utils import ( + TensorType, + is_tf_available, + is_torch_available, + logging, +) +from .config import OnnxConfig + + +if is_torch_available(): + from ..modeling_utils import PreTrainedModel + +if is_tf_available(): + from ..modeling_tf_utils import TFPreTrainedModel + +if TYPE_CHECKING: + from ..feature_extraction_utils import FeatureExtractionMixin + from ..processing_utils import ProcessorMixin + from ..tokenization_utils import PreTrainedTokenizer + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +# This is the minimal required version to support some ONNX Runtime features +ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0") + + +def check_onnxruntime_requirements(minimum_version: Version): + """ + Check onnxruntime is installed and if the installed version match is recent enough + + Raises: + ImportError: If onnxruntime is not installed or too old version is found + """ + try: + import onnxruntime + + # Parse the version of the installed onnxruntime + ort_version = parse(onnxruntime.__version__) + + # We require 1.4.0 minimum + if ort_version < ORT_QUANTIZE_MINIMUM_VERSION: + raise ImportError( + f"We found an older version of onnxruntime ({onnxruntime.__version__}) " + f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n" + "Please update onnxruntime by running `pip install --upgrade onnxruntime`" + ) + + except ImportError: + raise ImportError( + "onnxruntime doesn't seem to be currently installed. " + "Please install the onnxruntime by running `pip install onnxruntime`" + " and relaunch the conversion." + ) + + +def export_pytorch( + preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], + model: "PreTrainedModel", + config: OnnxConfig, + opset: int, + output: Path, + tokenizer: "PreTrainedTokenizer" = None, + device: str = "cpu", +) -> Tuple[List[str], List[str]]: + """ + Export a PyTorch model to an ONNX Intermediate Representation (IR) + + Args: + preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): + The preprocessor used for encoding the data. + model ([`PreTrainedModel`]): + The model to export. + config ([`~onnx.config.OnnxConfig`]): + The ONNX configuration associated with the exported model. + opset (`int`): + The version of the ONNX operator set to use. + output (`Path`): + Directory to store the exported ONNX model. + device (`str`, *optional*, defaults to `cpu`): + The device on which the ONNX model will be exported. Either `cpu` or `cuda`. + + Returns: + `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from + the ONNX configuration. + """ + + if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: + raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.") + if tokenizer is not None: + warnings.warn( + "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" + " `preprocessor` instead.", + FutureWarning, + ) + logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") + preprocessor = tokenizer + + if issubclass(type(model), PreTrainedModel): + import torch + from torch.onnx import export as onnx_export + + logger.info(f"Using framework PyTorch: {torch.__version__}") + with torch.no_grad(): + model.config.return_dict = True + model.eval() + + # Check if we need to override certain configuration item + if config.values_override is not None: + logger.info(f"Overriding {len(config.values_override)} configuration item(s)") + for override_config_key, override_config_value in config.values_override.items(): + logger.info(f"\t- {override_config_key} -> {override_config_value}") + setattr(model.config, override_config_key, override_config_value) + + # Ensure inputs match + # TODO: Check when exporting QA we provide "is_pair=True" + model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.PYTORCH) + device = torch.device(device) + if device.type == "cuda" and torch.cuda.is_available(): + model.to(device) + model_inputs_device = {} + for k, v in model_inputs.items(): + if isinstance(v, Tuple): + model_inputs_device[k] = tuple( + x.to(device) if isinstance(x, torch.Tensor) else None for x in v + ) + elif isinstance(v, List): + model_inputs_device[k] = [ + tuple(x.to(device) if isinstance(x, torch.Tensor) else None for x in t) for t in v + ] + else: + model_inputs_device[k] = v.to(device) + + model_inputs = model_inputs_device + + inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) + onnx_outputs = list(config.outputs.keys()) + + if not inputs_match: + raise ValueError("Model and config inputs doesn't match") + + config.patch_ops() + + onnx_export( + model, + (model_inputs,), + f=output.as_posix(), + input_names=list(config.inputs.keys()), + output_names=onnx_outputs, + dynamic_axes=dict(chain(config.inputs.items(), config.outputs.items())), + do_constant_folding=True, + opset_version=opset, + ) + + config.restore_ops() + + return matched_inputs, onnx_outputs + + +def export_tensorflow( + preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin"], + model: "TFPreTrainedModel", + config: OnnxConfig, + opset: int, + output: Path, + tokenizer: "PreTrainedTokenizer" = None, +) -> Tuple[List[str], List[str]]: + """ + Export a TensorFlow model to an ONNX Intermediate Representation (IR) + + Args: + preprocessor: ([`PreTrainedTokenizer`] or [`FeatureExtractionMixin`]): + The preprocessor used for encoding the data. + model ([`TFPreTrainedModel`]): + The model to export. + config ([`~onnx.config.OnnxConfig`]): + The ONNX configuration associated with the exported model. + opset (`int`): + The version of the ONNX operator set to use. + output (`Path`): + Directory to store the exported ONNX model. + + Returns: + `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from + the ONNX configuration. + """ + import onnx + import tensorflow as tf + import tf2onnx + + if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: + raise ValueError("You cannot provide both a tokenizer and preprocessor to export the model.") + if tokenizer is not None: + warnings.warn( + "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" + " `preprocessor` instead.", + FutureWarning, + ) + logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") + preprocessor = tokenizer + + model.config.return_dict = True + + # Check if we need to override certain configuration item + if config.values_override is not None: + logger.info(f"Overriding {len(config.values_override)} configuration item(s)") + for override_config_key, override_config_value in config.values_override.items(): + logger.info(f"\t- {override_config_key} -> {override_config_value}") + setattr(model.config, override_config_key, override_config_value) + + # Ensure inputs match + model_inputs = config.generate_dummy_inputs(preprocessor, framework=TensorType.TENSORFLOW) + inputs_match, matched_inputs = ensure_model_and_config_inputs_match(model, model_inputs.keys()) + onnx_outputs = list(config.outputs.keys()) + + input_signature = [ + tf.TensorSpec([None] * tensor.ndim, dtype=tensor.dtype, name=key) for key, tensor in model_inputs.items() + ] + onnx_model, _ = tf2onnx.convert.from_keras(model, input_signature, opset=opset) + onnx.save(onnx_model, output.as_posix()) + config.restore_ops() + + return matched_inputs, onnx_outputs + + +def export( + preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], + model: Union["PreTrainedModel", "TFPreTrainedModel"], + config: OnnxConfig, + opset: int, + output: Path, + tokenizer: "PreTrainedTokenizer" = None, + device: str = "cpu", +) -> Tuple[List[str], List[str]]: + """ + Export a Pytorch or TensorFlow model to an ONNX Intermediate Representation (IR) + + Args: + preprocessor: ([`PreTrainedTokenizer`], [`FeatureExtractionMixin`] or [`ProcessorMixin`]): + The preprocessor used for encoding the data. + model ([`PreTrainedModel`] or [`TFPreTrainedModel`]): + The model to export. + config ([`~onnx.config.OnnxConfig`]): + The ONNX configuration associated with the exported model. + opset (`int`): + The version of the ONNX operator set to use. + output (`Path`): + Directory to store the exported ONNX model. + device (`str`, *optional*, defaults to `cpu`): + The device on which the ONNX model will be exported. Either `cpu` or `cuda`. Only PyTorch is supported for + export on CUDA devices. + + Returns: + `Tuple[List[str], List[str]]`: A tuple with an ordered list of the model's inputs, and the named inputs from + the ONNX configuration. + """ + if not (is_torch_available() or is_tf_available()): + raise ImportError( + "Cannot convert because neither PyTorch nor TensorFlow are not installed. " + "Please install torch or tensorflow first." + ) + + if is_tf_available() and isinstance(model, TFPreTrainedModel) and device == "cuda": + raise RuntimeError("`tf2onnx` does not support export on CUDA device.") + + if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: + raise ValueError("You cannot provide both a tokenizer and a preprocessor to export the model.") + if tokenizer is not None: + warnings.warn( + "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" + " `preprocessor` instead.", + FutureWarning, + ) + logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") + preprocessor = tokenizer + + if is_torch_available(): + from ..utils import get_torch_version + + if not config.is_torch_support_available: + logger.warning( + f"Unsupported PyTorch version for this model. Minimum required is {config.torch_onnx_minimum_version}," + f" got: {get_torch_version()}" + ) + + if is_torch_available() and issubclass(type(model), PreTrainedModel): + return export_pytorch(preprocessor, model, config, opset, output, tokenizer=tokenizer, device=device) + elif is_tf_available() and issubclass(type(model), TFPreTrainedModel): + return export_tensorflow(preprocessor, model, config, opset, output, tokenizer=tokenizer) + + +def validate_model_outputs( + config: OnnxConfig, + preprocessor: Union["PreTrainedTokenizer", "FeatureExtractionMixin", "ProcessorMixin"], + reference_model: Union["PreTrainedModel", "TFPreTrainedModel"], + onnx_model: Path, + onnx_named_outputs: List[str], + atol: float, + tokenizer: "PreTrainedTokenizer" = None, +): + from onnxruntime import InferenceSession, SessionOptions + + logger.info("Validating ONNX model...") + + if isinstance(preprocessor, PreTrainedTokenizerBase) and tokenizer is not None: + raise ValueError("You cannot provide both a tokenizer and a preprocessor to validate the model outputs.") + if tokenizer is not None: + warnings.warn( + "The `tokenizer` argument is deprecated and will be removed in version 5 of Transformers. Use" + " `preprocessor` instead.", + FutureWarning, + ) + logger.info("Overwriting the `preprocessor` argument with `tokenizer` to generate dummmy inputs.") + preprocessor = tokenizer + + # generate inputs with a different batch_size and seq_len that was used for conversion to properly test + # dynamic input shapes. + if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): + reference_model_inputs = config.generate_dummy_inputs( + preprocessor, + batch_size=config.default_fixed_batch + 1, + seq_length=config.default_fixed_sequence + 1, + framework=TensorType.PYTORCH, + ) + else: + reference_model_inputs = config.generate_dummy_inputs( + preprocessor, + batch_size=config.default_fixed_batch + 1, + seq_length=config.default_fixed_sequence + 1, + framework=TensorType.TENSORFLOW, + ) + + # Create ONNX Runtime session + options = SessionOptions() + session = InferenceSession(onnx_model.as_posix(), options, providers=["CPUExecutionProvider"]) + + # Compute outputs from the reference model + if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): + reference_model.to("cpu") + ref_outputs = reference_model(**reference_model_inputs) + ref_outputs_dict = {} + + # We flatten potential collection of outputs (i.e. past_keys) to a flat structure + for name, value in ref_outputs.items(): + # Overwriting the output name as "present" since it is the name used for the ONNX outputs + # ("past_key_values" being taken for the ONNX inputs) + if name == "past_key_values": + name = "present" + if isinstance(value, (list, tuple)): + value = config.flatten_output_collection_property(name, value) + ref_outputs_dict.update(value) + else: + ref_outputs_dict[name] = value + + # Create onnxruntime inputs from the reference model inputs + reference_model_inputs_onnxruntime = config.generate_dummy_inputs_onnxruntime(reference_model_inputs) + + # We flatten potential collection of inputs (i.e. past_keys) + onnx_inputs = {} + for name, value in reference_model_inputs_onnxruntime.items(): + if isinstance(value, (list, tuple)): + value = config.flatten_output_collection_property(name, value) + onnx_inputs.update({tensor_name: pt_tensor.numpy() for tensor_name, pt_tensor in value.items()}) + else: + onnx_inputs[name] = value.numpy() + + # Compute outputs from the ONNX model + onnx_outputs = session.run(onnx_named_outputs, onnx_inputs) + + # Check we have a subset of the keys into onnx_outputs against ref_outputs + ref_outputs_set, onnx_outputs_set = set(ref_outputs_dict.keys()), set(onnx_named_outputs) + if not onnx_outputs_set.issubset(ref_outputs_set): + logger.info( + f"\t-[x] ONNX model output names {onnx_outputs_set} do not match reference model {ref_outputs_set}" + ) + + raise ValueError( + "Outputs doesn't match between reference model and ONNX exported model: " + f"{onnx_outputs_set.difference(ref_outputs_set)}" + ) + else: + logger.info(f"\t-[✓] ONNX model output names match reference model ({onnx_outputs_set})") + + # Check the shape and values match + for name, ort_value in zip(onnx_named_outputs, onnx_outputs): + if is_torch_available() and issubclass(type(reference_model), PreTrainedModel): + ref_value = ref_outputs_dict[name].detach().numpy() + else: + ref_value = ref_outputs_dict[name].numpy() + logger.info(f'\t- Validating ONNX Model output "{name}":') + + # Shape + if not ort_value.shape == ref_value.shape: + logger.info(f"\t\t-[x] shape {ort_value.shape} doesn't match {ref_value.shape}") + raise ValueError( + "Outputs shape doesn't match between reference model and ONNX exported model: " + f"Got {ref_value.shape} (reference) and {ort_value.shape} (ONNX)" + ) + else: + logger.info(f"\t\t-[✓] {ort_value.shape} matches {ref_value.shape}") + + # Values + if not np.allclose(ref_value, ort_value, atol=atol): + bad_indices = np.logical_not(np.isclose(ref_value, ort_value, atol=atol)) + logger.info(f"\t\t-[x] values not close enough (atol: {atol})") + raise ValueError( + "Outputs values doesn't match between reference model and ONNX exported model: " + f"Got max absolute difference of: {np.amax(np.abs(ref_value - ort_value))} for " + f"{ref_value[bad_indices]} vs {ort_value[bad_indices]}" + ) + else: + logger.info(f"\t\t-[✓] all values close (atol: {atol})") + + +def ensure_model_and_config_inputs_match( + model: Union["PreTrainedModel", "TFPreTrainedModel"], model_inputs: Iterable[str] +) -> Tuple[bool, List[str]]: + """ + + :param model_inputs: :param config_inputs: :return: + """ + if is_torch_available() and issubclass(type(model), PreTrainedModel): + forward_parameters = signature(model.forward).parameters + else: + forward_parameters = signature(model.call).parameters + model_inputs_set = set(model_inputs) + + # We are fine if config_inputs has more keys than model_inputs + forward_inputs_set = set(forward_parameters.keys()) + is_ok = model_inputs_set.issubset(forward_inputs_set) + + # Make sure the input order match (VERY IMPORTANT !!!!) + matching_inputs = forward_inputs_set.intersection(model_inputs_set) + ordered_inputs = [parameter for parameter in forward_parameters.keys() if parameter in matching_inputs] + return is_ok, ordered_inputs diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/features.py b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/features.py new file mode 100644 index 0000000000000000000000000000000000000000..1b0bf23d61213cf94a2c1e04650103a70cab4cfb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/features.py @@ -0,0 +1,749 @@ +import os +from functools import partial, reduce +from typing import TYPE_CHECKING, Callable, Dict, Optional, Tuple, Type, Union + +import transformers + +from .. import PretrainedConfig, is_tf_available, is_torch_available +from ..utils import TF2_WEIGHTS_NAME, WEIGHTS_NAME, logging +from .config import OnnxConfig + + +if TYPE_CHECKING: + from transformers import PreTrainedModel, TFPreTrainedModel + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + +if is_torch_available(): + from transformers.models.auto import ( + AutoModel, + AutoModelForCausalLM, + AutoModelForImageClassification, + AutoModelForImageSegmentation, + AutoModelForMaskedImageModeling, + AutoModelForMaskedLM, + AutoModelForMultipleChoice, + AutoModelForObjectDetection, + AutoModelForQuestionAnswering, + AutoModelForSemanticSegmentation, + AutoModelForSeq2SeqLM, + AutoModelForSequenceClassification, + AutoModelForSpeechSeq2Seq, + AutoModelForTokenClassification, + AutoModelForVision2Seq, + ) +if is_tf_available(): + from transformers.models.auto import ( + TFAutoModel, + TFAutoModelForCausalLM, + TFAutoModelForMaskedLM, + TFAutoModelForMultipleChoice, + TFAutoModelForQuestionAnswering, + TFAutoModelForSemanticSegmentation, + TFAutoModelForSeq2SeqLM, + TFAutoModelForSequenceClassification, + TFAutoModelForTokenClassification, + ) +if not is_torch_available() and not is_tf_available(): + logger.warning( + "The ONNX export features are only supported for PyTorch or TensorFlow. You will not be able to export models" + " without one of these libraries installed." + ) + + +def supported_features_mapping( + *supported_features: str, onnx_config_cls: str = None +) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]: + """ + Generate the mapping between supported the features and their corresponding OnnxConfig for a given model. + + Args: + *supported_features: The names of the supported features. + onnx_config_cls: The OnnxConfig full name corresponding to the model. + + Returns: + The dictionary mapping a feature to an OnnxConfig constructor. + """ + if onnx_config_cls is None: + raise ValueError("A OnnxConfig class must be provided") + + config_cls = transformers + for attr_name in onnx_config_cls.split("."): + config_cls = getattr(config_cls, attr_name) + mapping = {} + for feature in supported_features: + if "-with-past" in feature: + task = feature.replace("-with-past", "") + mapping[feature] = partial(config_cls.with_past, task=task) + else: + mapping[feature] = partial(config_cls.from_model_config, task=feature) + + return mapping + + +class FeaturesManager: + _TASKS_TO_AUTOMODELS = {} + _TASKS_TO_TF_AUTOMODELS = {} + if is_torch_available(): + _TASKS_TO_AUTOMODELS = { + "default": AutoModel, + "masked-lm": AutoModelForMaskedLM, + "causal-lm": AutoModelForCausalLM, + "seq2seq-lm": AutoModelForSeq2SeqLM, + "sequence-classification": AutoModelForSequenceClassification, + "token-classification": AutoModelForTokenClassification, + "multiple-choice": AutoModelForMultipleChoice, + "object-detection": AutoModelForObjectDetection, + "question-answering": AutoModelForQuestionAnswering, + "image-classification": AutoModelForImageClassification, + "image-segmentation": AutoModelForImageSegmentation, + "masked-im": AutoModelForMaskedImageModeling, + "semantic-segmentation": AutoModelForSemanticSegmentation, + "vision2seq-lm": AutoModelForVision2Seq, + "speech2seq-lm": AutoModelForSpeechSeq2Seq, + } + if is_tf_available(): + _TASKS_TO_TF_AUTOMODELS = { + "default": TFAutoModel, + "masked-lm": TFAutoModelForMaskedLM, + "causal-lm": TFAutoModelForCausalLM, + "seq2seq-lm": TFAutoModelForSeq2SeqLM, + "sequence-classification": TFAutoModelForSequenceClassification, + "token-classification": TFAutoModelForTokenClassification, + "multiple-choice": TFAutoModelForMultipleChoice, + "question-answering": TFAutoModelForQuestionAnswering, + "semantic-segmentation": TFAutoModelForSemanticSegmentation, + } + + # Set of model topologies we support associated to the features supported by each topology and the factory + _SUPPORTED_MODEL_TYPE = { + "albert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.albert.AlbertOnnxConfig", + ), + "bart": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + "sequence-classification", + "question-answering", + onnx_config_cls="models.bart.BartOnnxConfig", + ), + # BEiT cannot be used with the masked image modeling autoclass, so this feature is excluded here + "beit": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.beit.BeitOnnxConfig" + ), + "bert": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.bert.BertOnnxConfig", + ), + "big-bird": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.big_bird.BigBirdOnnxConfig", + ), + "bigbird-pegasus": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + "sequence-classification", + "question-answering", + onnx_config_cls="models.bigbird_pegasus.BigBirdPegasusOnnxConfig", + ), + "blenderbot": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.blenderbot.BlenderbotOnnxConfig", + ), + "blenderbot-small": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.blenderbot_small.BlenderbotSmallOnnxConfig", + ), + "bloom": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "sequence-classification", + "token-classification", + onnx_config_cls="models.bloom.BloomOnnxConfig", + ), + "camembert": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.camembert.CamembertOnnxConfig", + ), + "clip": supported_features_mapping( + "default", + onnx_config_cls="models.clip.CLIPOnnxConfig", + ), + "codegen": supported_features_mapping( + "default", + "causal-lm", + onnx_config_cls="models.codegen.CodeGenOnnxConfig", + ), + "convbert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.convbert.ConvBertOnnxConfig", + ), + "convnext": supported_features_mapping( + "default", + "image-classification", + onnx_config_cls="models.convnext.ConvNextOnnxConfig", + ), + "data2vec-text": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.data2vec.Data2VecTextOnnxConfig", + ), + "data2vec-vision": supported_features_mapping( + "default", + "image-classification", + # ONNX doesn't support `adaptive_avg_pool2d` yet + # "semantic-segmentation", + onnx_config_cls="models.data2vec.Data2VecVisionOnnxConfig", + ), + "deberta": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "token-classification", + "question-answering", + onnx_config_cls="models.deberta.DebertaOnnxConfig", + ), + "deberta-v2": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.deberta_v2.DebertaV2OnnxConfig", + ), + "deit": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.deit.DeiTOnnxConfig" + ), + "detr": supported_features_mapping( + "default", + "object-detection", + "image-segmentation", + onnx_config_cls="models.detr.DetrOnnxConfig", + ), + "distilbert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.distilbert.DistilBertOnnxConfig", + ), + "electra": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.electra.ElectraOnnxConfig", + ), + "flaubert": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.flaubert.FlaubertOnnxConfig", + ), + "gpt2": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "sequence-classification", + "token-classification", + onnx_config_cls="models.gpt2.GPT2OnnxConfig", + ), + "gptj": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "question-answering", + "sequence-classification", + onnx_config_cls="models.gptj.GPTJOnnxConfig", + ), + "gpt-neo": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "sequence-classification", + onnx_config_cls="models.gpt_neo.GPTNeoOnnxConfig", + ), + "groupvit": supported_features_mapping( + "default", + onnx_config_cls="models.groupvit.GroupViTOnnxConfig", + ), + "ibert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.ibert.IBertOnnxConfig", + ), + "imagegpt": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.imagegpt.ImageGPTOnnxConfig" + ), + "layoutlm": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "token-classification", + onnx_config_cls="models.layoutlm.LayoutLMOnnxConfig", + ), + "layoutlmv3": supported_features_mapping( + "default", + "question-answering", + "sequence-classification", + "token-classification", + onnx_config_cls="models.layoutlmv3.LayoutLMv3OnnxConfig", + ), + "levit": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.levit.LevitOnnxConfig" + ), + "longt5": supported_features_mapping( + "default", + "default-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.longt5.LongT5OnnxConfig", + ), + "longformer": supported_features_mapping( + "default", + "masked-lm", + "multiple-choice", + "question-answering", + "sequence-classification", + "token-classification", + onnx_config_cls="models.longformer.LongformerOnnxConfig", + ), + "marian": supported_features_mapping( + "default", + "default-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + "causal-lm", + "causal-lm-with-past", + onnx_config_cls="models.marian.MarianOnnxConfig", + ), + "mbart": supported_features_mapping( + "default", + "default-with-past", + "causal-lm", + "causal-lm-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + "sequence-classification", + "question-answering", + onnx_config_cls="models.mbart.MBartOnnxConfig", + ), + "mobilebert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.mobilebert.MobileBertOnnxConfig", + ), + "mobilenet-v1": supported_features_mapping( + "default", + "image-classification", + onnx_config_cls="models.mobilenet_v1.MobileNetV1OnnxConfig", + ), + "mobilenet-v2": supported_features_mapping( + "default", + "image-classification", + onnx_config_cls="models.mobilenet_v2.MobileNetV2OnnxConfig", + ), + "mobilevit": supported_features_mapping( + "default", + "image-classification", + onnx_config_cls="models.mobilevit.MobileViTOnnxConfig", + ), + "mt5": supported_features_mapping( + "default", + "default-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.mt5.MT5OnnxConfig", + ), + "m2m-100": supported_features_mapping( + "default", + "default-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.m2m_100.M2M100OnnxConfig", + ), + "owlvit": supported_features_mapping( + "default", + onnx_config_cls="models.owlvit.OwlViTOnnxConfig", + ), + "perceiver": supported_features_mapping( + "image-classification", + "masked-lm", + "sequence-classification", + onnx_config_cls="models.perceiver.PerceiverOnnxConfig", + ), + "poolformer": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.poolformer.PoolFormerOnnxConfig" + ), + "rembert": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.rembert.RemBertOnnxConfig", + ), + "resnet": supported_features_mapping( + "default", + "image-classification", + onnx_config_cls="models.resnet.ResNetOnnxConfig", + ), + "roberta": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.roberta.RobertaOnnxConfig", + ), + "roformer": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "token-classification", + "multiple-choice", + "question-answering", + "token-classification", + onnx_config_cls="models.roformer.RoFormerOnnxConfig", + ), + "segformer": supported_features_mapping( + "default", + "image-classification", + "semantic-segmentation", + onnx_config_cls="models.segformer.SegformerOnnxConfig", + ), + "squeezebert": supported_features_mapping( + "default", + "masked-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.squeezebert.SqueezeBertOnnxConfig", + ), + "swin": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.swin.SwinOnnxConfig" + ), + "t5": supported_features_mapping( + "default", + "default-with-past", + "seq2seq-lm", + "seq2seq-lm-with-past", + onnx_config_cls="models.t5.T5OnnxConfig", + ), + "vision-encoder-decoder": supported_features_mapping( + "vision2seq-lm", onnx_config_cls="models.vision_encoder_decoder.VisionEncoderDecoderOnnxConfig" + ), + "vit": supported_features_mapping( + "default", "image-classification", onnx_config_cls="models.vit.ViTOnnxConfig" + ), + "whisper": supported_features_mapping( + "default", + "default-with-past", + "speech2seq-lm", + "speech2seq-lm-with-past", + onnx_config_cls="models.whisper.WhisperOnnxConfig", + ), + "xlm": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.xlm.XLMOnnxConfig", + ), + "xlm-roberta": supported_features_mapping( + "default", + "masked-lm", + "causal-lm", + "sequence-classification", + "multiple-choice", + "token-classification", + "question-answering", + onnx_config_cls="models.xlm_roberta.XLMRobertaOnnxConfig", + ), + "yolos": supported_features_mapping( + "default", + "object-detection", + onnx_config_cls="models.yolos.YolosOnnxConfig", + ), + } + + AVAILABLE_FEATURES = sorted(reduce(lambda s1, s2: s1 | s2, (v.keys() for v in _SUPPORTED_MODEL_TYPE.values()))) + + @staticmethod + def get_supported_features_for_model_type( + model_type: str, model_name: Optional[str] = None + ) -> Dict[str, Callable[[PretrainedConfig], OnnxConfig]]: + """ + Tries to retrieve the feature -> OnnxConfig constructor map from the model type. + + Args: + model_type (`str`): + The model type to retrieve the supported features for. + model_name (`str`, *optional*): + The name attribute of the model object, only used for the exception message. + + Returns: + The dictionary mapping each feature to a corresponding OnnxConfig constructor. + """ + model_type = model_type.lower() + if model_type not in FeaturesManager._SUPPORTED_MODEL_TYPE: + model_type_and_model_name = f"{model_type} ({model_name})" if model_name else model_type + raise KeyError( + f"{model_type_and_model_name} is not supported yet. " + f"Only {list(FeaturesManager._SUPPORTED_MODEL_TYPE.keys())} are supported. " + f"If you want to support {model_type} please propose a PR or open up an issue." + ) + return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type] + + @staticmethod + def feature_to_task(feature: str) -> str: + return feature.replace("-with-past", "") + + @staticmethod + def _validate_framework_choice(framework: str): + """ + Validates if the framework requested for the export is both correct and available, otherwise throws an + exception. + """ + if framework not in ["pt", "tf"]: + raise ValueError( + f"Only two frameworks are supported for ONNX export: pt or tf, but {framework} was provided." + ) + elif framework == "pt" and not is_torch_available(): + raise RuntimeError("Cannot export model to ONNX using PyTorch because no PyTorch package was found.") + elif framework == "tf" and not is_tf_available(): + raise RuntimeError("Cannot export model to ONNX using TensorFlow because no TensorFlow package was found.") + + @staticmethod + def get_model_class_for_feature(feature: str, framework: str = "pt") -> Type: + """ + Attempts to retrieve an AutoModel class from a feature name. + + Args: + feature (`str`): + The feature required. + framework (`str`, *optional*, defaults to `"pt"`): + The framework to use for the export. + + Returns: + The AutoModel class corresponding to the feature. + """ + task = FeaturesManager.feature_to_task(feature) + FeaturesManager._validate_framework_choice(framework) + if framework == "pt": + task_to_automodel = FeaturesManager._TASKS_TO_AUTOMODELS + else: + task_to_automodel = FeaturesManager._TASKS_TO_TF_AUTOMODELS + if task not in task_to_automodel: + raise KeyError( + f"Unknown task: {feature}. Possible values are {list(FeaturesManager._TASKS_TO_AUTOMODELS.values())}" + ) + + return task_to_automodel[task] + + @staticmethod + def determine_framework(model: str, framework: str = None) -> str: + """ + Determines the framework to use for the export. + + The priority is in the following order: + 1. User input via `framework`. + 2. If local checkpoint is provided, use the same framework as the checkpoint. + 3. Available framework in environment, with priority given to PyTorch + + Args: + model (`str`): + The name of the model to export. + framework (`str`, *optional*, defaults to `None`): + The framework to use for the export. See above for priority if none provided. + + Returns: + The framework to use for the export. + + """ + if framework is not None: + return framework + + framework_map = {"pt": "PyTorch", "tf": "TensorFlow"} + exporter_map = {"pt": "torch", "tf": "tf2onnx"} + + if os.path.isdir(model): + if os.path.isfile(os.path.join(model, WEIGHTS_NAME)): + framework = "pt" + elif os.path.isfile(os.path.join(model, TF2_WEIGHTS_NAME)): + framework = "tf" + else: + raise FileNotFoundError( + "Cannot determine framework from given checkpoint location." + f" There should be a {WEIGHTS_NAME} for PyTorch" + f" or {TF2_WEIGHTS_NAME} for TensorFlow." + ) + logger.info(f"Local {framework_map[framework]} model found.") + else: + if is_torch_available(): + framework = "pt" + elif is_tf_available(): + framework = "tf" + else: + raise EnvironmentError("Neither PyTorch nor TensorFlow found in environment. Cannot export to ONNX.") + + logger.info(f"Framework not requested. Using {exporter_map[framework]} to export to ONNX.") + + return framework + + @staticmethod + def get_model_from_feature( + feature: str, model: str, framework: str = None, cache_dir: str = None + ) -> Union["PreTrainedModel", "TFPreTrainedModel"]: + """ + Attempts to retrieve a model from a model's name and the feature to be enabled. + + Args: + feature (`str`): + The feature required. + model (`str`): + The name of the model to export. + framework (`str`, *optional*, defaults to `None`): + The framework to use for the export. See `FeaturesManager.determine_framework` for the priority should + none be provided. + + Returns: + The instance of the model. + + """ + framework = FeaturesManager.determine_framework(model, framework) + model_class = FeaturesManager.get_model_class_for_feature(feature, framework) + try: + model = model_class.from_pretrained(model, cache_dir=cache_dir) + except OSError: + if framework == "pt": + logger.info("Loading TensorFlow model in PyTorch before exporting to ONNX.") + model = model_class.from_pretrained(model, from_tf=True, cache_dir=cache_dir) + else: + logger.info("Loading PyTorch model in TensorFlow before exporting to ONNX.") + model = model_class.from_pretrained(model, from_pt=True, cache_dir=cache_dir) + return model + + @staticmethod + def check_supported_model_or_raise( + model: Union["PreTrainedModel", "TFPreTrainedModel"], feature: str = "default" + ) -> Tuple[str, Callable]: + """ + Check whether or not the model has the requested features. + + Args: + model: The model to export. + feature: The name of the feature to check if it is available. + + Returns: + (str) The type of the model (OnnxConfig) The OnnxConfig instance holding the model export properties. + + """ + model_type = model.config.model_type.replace("_", "-") + model_name = getattr(model, "name", "") + model_features = FeaturesManager.get_supported_features_for_model_type(model_type, model_name=model_name) + if feature not in model_features: + raise ValueError( + f"{model.config.model_type} doesn't support feature {feature}. Supported values are: {model_features}" + ) + + return model.config.model_type, FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature] + + def get_config(model_type: str, feature: str) -> OnnxConfig: + """ + Gets the OnnxConfig for a model_type and feature combination. + + Args: + model_type (`str`): + The model type to retrieve the config for. + feature (`str`): + The feature to retrieve the config for. + + Returns: + `OnnxConfig`: config for the combination + """ + return FeaturesManager._SUPPORTED_MODEL_TYPE[model_type][feature] diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/onnx/utils.py b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9672b0a96af88ffa2c7e791d1f4d7c818174247f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/onnx/utils.py @@ -0,0 +1,109 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ctypes import c_float, sizeof +from enum import Enum +from typing import TYPE_CHECKING, Optional, Union + + +if TYPE_CHECKING: + from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore + + +class ParameterFormat(Enum): + Float = c_float + + @property + def size(self) -> int: + """ + Number of byte required for this data type + + Returns: + Integer > 0 + """ + return sizeof(self.value) + + +def compute_effective_axis_dimension(dimension: int, fixed_dimension: int, num_token_to_add: int = 0) -> int: + """ + + Args: + dimension: + fixed_dimension: + num_token_to_add: + + Returns: + + """ + # < 0 is possible if using a dynamic axis + if dimension <= 0: + dimension = fixed_dimension + + dimension -= num_token_to_add + return dimension + + +def compute_serialized_parameters_size(num_parameters: int, dtype: ParameterFormat) -> int: + """ + Compute the size taken by all the parameters in the given the storage format when serializing the model + + Args: + num_parameters: Number of parameters to be saved + dtype: The data format each parameter will be saved + + Returns: + Size (in byte) taken to save all the parameters + """ + return num_parameters * dtype.size + + +def get_preprocessor(model_name: str) -> Optional[Union["AutoTokenizer", "AutoFeatureExtractor", "AutoProcessor"]]: + """ + Gets a preprocessor (tokenizer, feature extractor or processor) that is available for `model_name`. + + Args: + model_name (`str`): Name of the model for which a preprocessor are loaded. + + Returns: + `Optional[Union[AutoTokenizer, AutoFeatureExtractor, AutoProcessor]]`: + If a processor is found, it is returned. Otherwise, if a tokenizer or a feature extractor exists, it is + returned. If both a tokenizer and a feature extractor exist, an error is raised. The function returns + `None` if no preprocessor is found. + """ + # Avoid circular imports by only importing this here. + from .. import AutoFeatureExtractor, AutoProcessor, AutoTokenizer # tests_ignore + + try: + return AutoProcessor.from_pretrained(model_name) + except (ValueError, OSError, KeyError): + tokenizer, feature_extractor = None, None + try: + tokenizer = AutoTokenizer.from_pretrained(model_name) + except (OSError, KeyError): + pass + try: + feature_extractor = AutoFeatureExtractor.from_pretrained(model_name) + except (OSError, KeyError): + pass + + if tokenizer is not None and feature_extractor is not None: + raise ValueError( + f"Couldn't auto-detect preprocessor for {model_name}. Found both a tokenizer and a feature extractor." + ) + elif tokenizer is None and feature_extractor is None: + return None + elif tokenizer is not None: + return tokenizer + else: + return feature_extractor diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/text_generation.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/text_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..0b358291717ee0281c28c009bfbb9a0144d68457 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/text_generation.py @@ -0,0 +1,374 @@ +import enum +import warnings +from typing import Dict + +from ..utils import add_end_docstrings, is_tf_available, is_torch_available +from .base import Pipeline, build_pipeline_init_args + + +if is_torch_available(): + from ..models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES + +if is_tf_available(): + import tensorflow as tf + + from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES + + +class ReturnType(enum.Enum): + TENSORS = 0 + NEW_TEXT = 1 + FULL_TEXT = 2 + + +class Chat: + """This class is intended to just be used internally in this pipeline and not exposed to users. We convert chats + to this format because the rest of the pipeline code tends to assume that lists of messages are + actually a batch of samples rather than messages in the same conversation.""" + + def __init__(self, messages: Dict): + for message in messages: + if not ("role" in message and "content" in message): + raise ValueError("When passing chat dicts as input, each dict must have a 'role' and 'content' key.") + self.messages = messages + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True)) +class TextGenerationPipeline(Pipeline): + """ + Language generation pipeline using any `ModelWithLMHead`. This pipeline predicts the words that will follow a + specified text prompt. It can also accept one or more chats. Each chat takes the form of a list of dicts, + where each dict contains "role" and "content" keys. + + Example: + + ```python + >>> from transformers import pipeline + + >>> generator = pipeline(model="openai-community/gpt2") + >>> generator("I can't believe you did such a ", do_sample=False) + [{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}] + + >>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions. + >>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False) + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text + generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about + text generation parameters in [Text generation strategies](../generation_strategies) and [Text + generation](text_generation). + + This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier: + `"text-generation"`. + + The models that this pipeline can use are models that have been trained with an autoregressive language modeling + objective, which includes the uni-directional models in the library (e.g. openai-community/gpt2). See the list of available models + on [huggingface.co/models](https://huggingface.co/models?filter=text-generation). + """ + + # Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia + # in https://github.com/rusiaaman/XLNet-gen#methodology + # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e + + XL_PREFIX = """ + In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The + voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western + Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision + and denounces one of the men as a horse thief. Although his father initially slaps him for making such an + accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of + the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop, + begging for his blessing. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.check_model_type( + TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING_NAMES + ) + if "prefix" not in self._preprocess_params: + # This is very specific. The logic is quite complex and needs to be done + # as a "default". + # It also defines both some preprocess_kwargs and generate_kwargs + # which is why we cannot put them in their respective methods. + prefix = None + if self.model.config.prefix is not None: + prefix = self.model.config.prefix + if prefix is None and self.model.__class__.__name__ in [ + "XLNetLMHeadModel", + "TransfoXLLMHeadModel", + "TFXLNetLMHeadModel", + "TFTransfoXLLMHeadModel", + ]: + # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. + prefix = self.XL_PREFIX + if prefix is not None: + # Recalculate some generate_kwargs linked to prefix. + preprocess_params, forward_params, _ = self._sanitize_parameters(prefix=prefix, **self._forward_params) + self._preprocess_params = {**self._preprocess_params, **preprocess_params} + self._forward_params = {**self._forward_params, **forward_params} + + def _sanitize_parameters( + self, + return_full_text=None, + return_tensors=None, + return_text=None, + return_type=None, + clean_up_tokenization_spaces=None, + prefix=None, + handle_long_generation=None, + stop_sequence=None, + add_special_tokens=False, + truncation=None, + padding=False, + max_length=None, + **generate_kwargs, + ): + preprocess_params = { + "add_special_tokens": add_special_tokens, + "truncation": truncation, + "padding": padding, + "max_length": max_length, + } + if max_length is not None: + generate_kwargs["max_length"] = max_length + + if prefix is not None: + preprocess_params["prefix"] = prefix + if prefix: + prefix_inputs = self.tokenizer( + prefix, padding=False, add_special_tokens=add_special_tokens, return_tensors=self.framework + ) + generate_kwargs["prefix_length"] = prefix_inputs["input_ids"].shape[-1] + + if handle_long_generation is not None: + if handle_long_generation not in {"hole"}: + raise ValueError( + f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" + " [None, 'hole']" + ) + preprocess_params["handle_long_generation"] = handle_long_generation + + preprocess_params.update(generate_kwargs) + forward_params = generate_kwargs + + postprocess_params = {} + if return_full_text is not None and return_type is None: + if return_text is not None: + raise ValueError("`return_text` is mutually exclusive with `return_full_text`") + if return_tensors is not None: + raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`") + return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT + if return_tensors is not None and return_type is None: + if return_text is not None: + raise ValueError("`return_text` is mutually exclusive with `return_tensors`") + return_type = ReturnType.TENSORS + if return_type is not None: + postprocess_params["return_type"] = return_type + if clean_up_tokenization_spaces is not None: + postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces + + if stop_sequence is not None: + stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False) + if len(stop_sequence_ids) > 1: + warnings.warn( + "Stopping on a multiple token sequence is not yet supported on transformers. The first token of" + " the stop sequence will be used as the stop sequence string in the interim." + ) + generate_kwargs["eos_token_id"] = stop_sequence_ids[0] + + return preprocess_params, forward_params, postprocess_params + + # overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments + def _parse_and_tokenize(self, *args, **kwargs): + """ + Parse arguments and tokenize + """ + # Parse arguments + if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: + kwargs.update({"add_space_before_punct_symbol": True}) + + return super()._parse_and_tokenize(*args, **kwargs) + + def __call__(self, text_inputs, **kwargs): + """ + Complete the prompt(s) given as inputs. + + Args: + text_inputs (`str` or `List[str]`): + One or several prompts (or one list of prompts) to complete. + return_tensors (`bool`, *optional*, defaults to `False`): + Whether or not to return the tensors of predictions (as token indices) in the outputs. If set to + `True`, the decoded text is not returned. + return_text (`bool`, *optional*, defaults to `True`): + Whether or not to return the decoded texts in the outputs. + return_full_text (`bool`, *optional*, defaults to `True`): + If set to `False` only added text is returned, otherwise the full text is returned. Only meaningful if + *return_text* is set to True. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`): + Whether or not to clean up the potential extra spaces in the text output. + prefix (`str`, *optional*): + Prefix added to prompt. + handle_long_generation (`str`, *optional*): + By default, this pipelines does not handle long generation (ones that exceed in one form or the other + the model maximum length). There is no perfect way to adress this (more info + :https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common + strategies to work around that problem depending on your use case. + + - `None` : default strategy where nothing in particular happens + - `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might + truncate a lot of the prompt and not suitable when generation exceed the model capacity) + generate_kwargs (`dict`, *optional*): + Additional keyword arguments to pass along to the generate method of the model (see the generate method + corresponding to your framework [here](./model#generative-models)). + + Return: + A list or a list of list of `dict`: Returns one of the following dictionaries (cannot return a combination + of both `generated_text` and `generated_token_ids`): + + - **generated_text** (`str`, present when `return_text=True`) -- The generated text. + - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token + ids of the generated text. + """ + if isinstance(text_inputs, (list, tuple)) and isinstance(text_inputs[0], (list, tuple, dict)): + # We have one or more prompts in list-of-dicts format, so this is chat mode + if isinstance(text_inputs[0], dict): + return super().__call__(Chat(text_inputs), **kwargs) + else: + chats = [Chat(chat) for chat in text_inputs] # 🐈 🐈 🐈 + return super().__call__(chats, **kwargs) + else: + return super().__call__(text_inputs, **kwargs) + + def preprocess( + self, + prompt_text, + prefix="", + handle_long_generation=None, + add_special_tokens=False, + truncation=None, + padding=False, + max_length=None, + **generate_kwargs, + ): + if isinstance(prompt_text, Chat): + inputs = self.tokenizer.apply_chat_template( + prompt_text.messages, + truncation=truncation, + padding=padding, + max_length=max_length, + add_generation_prompt=True, + return_dict=True, + return_tensors=self.framework, + ) + else: + inputs = self.tokenizer( + prefix + prompt_text, + truncation=truncation, + padding=padding, + max_length=max_length, + add_special_tokens=add_special_tokens, + return_tensors=self.framework, + ) + inputs["prompt_text"] = prompt_text + + if handle_long_generation == "hole": + cur_len = inputs["input_ids"].shape[-1] + if "max_new_tokens" in generate_kwargs: + new_tokens = generate_kwargs["max_new_tokens"] + else: + new_tokens = generate_kwargs.get("max_length", self.model.config.max_length) - cur_len + if new_tokens < 0: + raise ValueError("We cannot infer how many new tokens are expected") + if cur_len + new_tokens > self.tokenizer.model_max_length: + keep_length = self.tokenizer.model_max_length - new_tokens + if keep_length <= 0: + raise ValueError( + "We cannot use `hole` to handle this generation the number of desired tokens exceeds the" + " models max length" + ) + + inputs["input_ids"] = inputs["input_ids"][:, -keep_length:] + if "attention_mask" in inputs: + inputs["attention_mask"] = inputs["attention_mask"][:, -keep_length:] + + return inputs + + def _forward(self, model_inputs, **generate_kwargs): + input_ids = model_inputs["input_ids"] + attention_mask = model_inputs.get("attention_mask", None) + # Allow empty prompts + if input_ids.shape[1] == 0: + input_ids = None + attention_mask = None + in_b = 1 + else: + in_b = input_ids.shape[0] + prompt_text = model_inputs.pop("prompt_text") + + # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying + # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. + prefix_length = generate_kwargs.pop("prefix_length", 0) + if prefix_length > 0: + has_max_new_tokens = "max_new_tokens" in generate_kwargs or ( + "generation_config" in generate_kwargs + and generate_kwargs["generation_config"].max_new_tokens is not None + ) + if not has_max_new_tokens: + generate_kwargs["max_length"] = generate_kwargs.get("max_length") or self.model.config.max_length + generate_kwargs["max_length"] += prefix_length + has_min_new_tokens = "min_new_tokens" in generate_kwargs or ( + "generation_config" in generate_kwargs + and generate_kwargs["generation_config"].min_new_tokens is not None + ) + if not has_min_new_tokens and "min_length" in generate_kwargs: + generate_kwargs["min_length"] += prefix_length + + # BS x SL + generated_sequence = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs) + out_b = generated_sequence.shape[0] + if self.framework == "pt": + generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:]) + elif self.framework == "tf": + generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:])) + return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} + + def postprocess(self, model_outputs, return_type=ReturnType.FULL_TEXT, clean_up_tokenization_spaces=True): + generated_sequence = model_outputs["generated_sequence"][0] + input_ids = model_outputs["input_ids"] + prompt_text = model_outputs["prompt_text"] + generated_sequence = generated_sequence.numpy().tolist() + records = [] + for sequence in generated_sequence: + if return_type == ReturnType.TENSORS: + record = {"generated_token_ids": sequence} + elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: + # Decode text + text = self.tokenizer.decode( + sequence, + skip_special_tokens=True, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + ) + + # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used + if input_ids is None: + prompt_length = 0 + else: + prompt_length = len( + self.tokenizer.decode( + input_ids[0], + skip_special_tokens=True, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + ) + ) + + all_text = text[prompt_length:] + if return_type == ReturnType.FULL_TEXT: + if isinstance(prompt_text, str): + all_text = prompt_text + all_text + elif isinstance(prompt_text, Chat): + all_text = prompt_text.messages + [{"role": "assistant", "content": all_text}] + + record = {"generated_text": all_text} + records.append(record) + + return records diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/visual_question_answering.py b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/visual_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..9106b19d33671a959a5be0d834e48a8a3dc05010 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/pipelines/visual_question_answering.py @@ -0,0 +1,151 @@ +from typing import Union + +from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging +from .base import Pipeline, build_pipeline_init_args + + +if is_vision_available(): + from PIL import Image + + from ..image_utils import load_image + +if is_torch_available(): + from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES + +logger = logging.get_logger(__name__) + + +@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_image_processor=True)) +class VisualQuestionAnsweringPipeline(Pipeline): + """ + Visual Question Answering pipeline using a `AutoModelForVisualQuestionAnswering`. This pipeline is currently only + available in PyTorch. + + Example: + + ```python + >>> from transformers import pipeline + + >>> oracle = pipeline(model="dandelin/vilt-b32-finetuned-vqa") + >>> image_url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/lena.png" + >>> oracle(question="What is she wearing ?", image=image_url) + [{'score': 0.948, 'answer': 'hat'}, {'score': 0.009, 'answer': 'fedora'}, {'score': 0.003, 'answer': 'clothes'}, {'score': 0.003, 'answer': 'sun hat'}, {'score': 0.002, 'answer': 'nothing'}] + + >>> oracle(question="What is she wearing ?", image=image_url, top_k=1) + [{'score': 0.948, 'answer': 'hat'}] + + >>> oracle(question="Is this a person ?", image=image_url, top_k=1) + [{'score': 0.993, 'answer': 'yes'}] + + >>> oracle(question="Is this a man ?", image=image_url, top_k=1) + [{'score': 0.996, 'answer': 'no'}] + ``` + + Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial) + + This visual question answering pipeline can currently be loaded from [`pipeline`] using the following task + identifiers: `"visual-question-answering", "vqa"`. + + The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See + the up-to-date list of available models on + [huggingface.co/models](https://huggingface.co/models?filter=visual-question-answering). + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES) + + def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, timeout=None, **kwargs): + preprocess_params, postprocess_params = {}, {} + if padding is not None: + preprocess_params["padding"] = padding + if truncation is not None: + preprocess_params["truncation"] = truncation + if timeout is not None: + preprocess_params["timeout"] = timeout + if top_k is not None: + postprocess_params["top_k"] = top_k + return preprocess_params, {}, postprocess_params + + def __call__(self, image: Union["Image.Image", str], question: str = None, **kwargs): + r""" + Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed + below: + + - `pipeline(image=image, question=question)` + - `pipeline({"image": image, "question": question})` + - `pipeline([{"image": image, "question": question}])` + - `pipeline([{"image": image, "question": question}, {"image": image, "question": question}])` + + Args: + image (`str`, `List[str]`, `PIL.Image` or `List[PIL.Image]`): + The pipeline handles three types of images: + + - A string containing a http link pointing to an image + - A string containing a local path to an image + - An image loaded in PIL directly + + The pipeline accepts either a single image or a batch of images. If given a single image, it can be + broadcasted to multiple questions. + question (`str`, `List[str]`): + The question(s) asked. If given a single question, it can be broadcasted to multiple images. + top_k (`int`, *optional*, defaults to 5): + The number of top labels that will be returned by the pipeline. If the provided number is higher than + the number of labels available in the model configuration, it will default to the number of labels. + timeout (`float`, *optional*, defaults to None): + The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and + the call may block forever. + Return: + A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys: + + - **label** (`str`) -- The label identified by the model. + - **score** (`int`) -- The score attributed by the model for that label. + """ + if isinstance(image, (Image.Image, str)) and isinstance(question, str): + inputs = {"image": image, "question": question} + else: + """ + Supports the following format + - {"image": image, "question": question} + - [{"image": image, "question": question}] + - Generator and datasets + """ + inputs = image + results = super().__call__(inputs, **kwargs) + return results + + def preprocess(self, inputs, padding=False, truncation=False, timeout=None): + image = load_image(inputs["image"], timeout=timeout) + model_inputs = self.tokenizer( + inputs["question"], return_tensors=self.framework, padding=padding, truncation=truncation + ) + image_features = self.image_processor(images=image, return_tensors=self.framework) + model_inputs.update(image_features) + return model_inputs + + def _forward(self, model_inputs, **generate_kwargs): + if self.model.can_generate(): + model_outputs = self.model.generate(**model_inputs, **generate_kwargs) + else: + model_outputs = self.model(**model_inputs) + return model_outputs + + def postprocess(self, model_outputs, top_k=5): + if self.model.can_generate(): + return [ + {"answer": self.tokenizer.decode(output_ids, skip_special_tokens=True).strip()} + for output_ids in model_outputs + ] + else: + if top_k > self.model.config.num_labels: + top_k = self.model.config.num_labels + + if self.framework == "pt": + probs = model_outputs.logits.sigmoid()[0] + scores, ids = probs.topk(top_k) + else: + raise ValueError(f"Unsupported framework: {self.framework}") + + scores = scores.tolist() + ids = ids.tolist() + return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)] diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96adc7c53a9d478ad3e6586ad3868b2a519abf5d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/agent_types.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/agent_types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..577447ed47bd7328440e48e2e667961b4a650d52 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/agent_types.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/agents.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/agents.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b0b2c9c81e0f87792da128af704a92aa78f8360 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/agents.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/evaluate_agent.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/evaluate_agent.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e23466585b8b835c004be2f72fc91b9ba1c5ecb2 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/evaluate_agent.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/image_captioning.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/image_captioning.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a37054edfad2f0356919c48507240d1db948aff Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/image_captioning.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/image_segmentation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/image_segmentation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bb1d0f43218efcd24db9152f1235b5c18232c5a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/image_segmentation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/prompts.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/prompts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79f75d849a3ca243943714346b9f6534bd4dc19f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/prompts.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/python_interpreter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/python_interpreter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1950868fd1691782c31ef5b411c942c12d14257e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/python_interpreter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/speech_to_text.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/speech_to_text.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b39cb52faac58d925f49650165da95046640e35 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/speech_to_text.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_classification.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_classification.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b623a46bfd1b58b1f1aacfcd7185ae3a90f32693 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_classification.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_question_answering.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_question_answering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3709b0503c0a32def62743360bb5415c054e55ef Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_question_answering.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_summarization.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_summarization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7da2fc21b9c9ff7798e6b89e62cee5bea8d58735 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_summarization.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_to_speech.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_to_speech.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa98b54647a2e76a7185419474b93daa93089c39 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/text_to_speech.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/translation.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/translation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e30af8e002e3337253a5395fafa7cfdb9940aac6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/transformers/tools/__pycache__/translation.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/transformers/tools/agents.py b/llmeval-env/lib/python3.10/site-packages/transformers/tools/agents.py new file mode 100644 index 0000000000000000000000000000000000000000..86adde93ef3001b056d1643a8d9fef6786864441 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/transformers/tools/agents.py @@ -0,0 +1,778 @@ +#!/usr/bin/env python +# coding=utf-8 + +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import importlib.util +import json +import os +import time +from dataclasses import dataclass +from typing import Dict + +import requests +from huggingface_hub import HfFolder, hf_hub_download, list_spaces + +from ..models.auto import AutoTokenizer +from ..utils import is_offline_mode, is_openai_available, is_torch_available, logging +from .base import TASK_MAPPING, TOOL_CONFIG_FILE, Tool, load_tool, supports_remote +from .prompts import CHAT_MESSAGE_PROMPT, download_prompt +from .python_interpreter import evaluate + + +logger = logging.get_logger(__name__) + + +if is_openai_available(): + import openai + +if is_torch_available(): + from ..generation import StoppingCriteria, StoppingCriteriaList + from ..models.auto import AutoModelForCausalLM +else: + StoppingCriteria = object + +_tools_are_initialized = False + + +BASE_PYTHON_TOOLS = { + "print": print, + "range": range, + "float": float, + "int": int, + "bool": bool, + "str": str, +} + + +@dataclass +class PreTool: + task: str + description: str + repo_id: str + + +HUGGINGFACE_DEFAULT_TOOLS = {} + + +HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB = [ + "image-transformation", + "text-download", + "text-to-image", + "text-to-video", +] + + +def get_remote_tools(organization="huggingface-tools"): + if is_offline_mode(): + logger.info("You are in offline mode, so remote tools are not available.") + return {} + + spaces = list_spaces(author=organization) + tools = {} + for space_info in spaces: + repo_id = space_info.id + resolved_config_file = hf_hub_download(repo_id, TOOL_CONFIG_FILE, repo_type="space") + with open(resolved_config_file, encoding="utf-8") as reader: + config = json.load(reader) + + task = repo_id.split("/")[-1] + tools[config["name"]] = PreTool(task=task, description=config["description"], repo_id=repo_id) + + return tools + + +def _setup_default_tools(): + global HUGGINGFACE_DEFAULT_TOOLS + global _tools_are_initialized + + if _tools_are_initialized: + return + + main_module = importlib.import_module("transformers") + tools_module = main_module.tools + + remote_tools = get_remote_tools() + for task_name, tool_class_name in TASK_MAPPING.items(): + tool_class = getattr(tools_module, tool_class_name) + description = tool_class.description + HUGGINGFACE_DEFAULT_TOOLS[tool_class.name] = PreTool(task=task_name, description=description, repo_id=None) + + if not is_offline_mode(): + for task_name in HUGGINGFACE_DEFAULT_TOOLS_FROM_HUB: + found = False + for tool_name, tool in remote_tools.items(): + if tool.task == task_name: + HUGGINGFACE_DEFAULT_TOOLS[tool_name] = tool + found = True + break + + if not found: + raise ValueError(f"{task_name} is not implemented on the Hub.") + + _tools_are_initialized = True + + +def resolve_tools(code, toolbox, remote=False, cached_tools=None): + if cached_tools is None: + resolved_tools = BASE_PYTHON_TOOLS.copy() + else: + resolved_tools = cached_tools + for name, tool in toolbox.items(): + if name not in code or name in resolved_tools: + continue + + if isinstance(tool, Tool): + resolved_tools[name] = tool + else: + task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id + _remote = remote and supports_remote(task_or_repo_id) + resolved_tools[name] = load_tool(task_or_repo_id, remote=_remote) + + return resolved_tools + + +def get_tool_creation_code(code, toolbox, remote=False): + code_lines = ["from transformers import load_tool", ""] + for name, tool in toolbox.items(): + if name not in code or isinstance(tool, Tool): + continue + + task_or_repo_id = tool.task if tool.repo_id is None else tool.repo_id + line = f'{name} = load_tool("{task_or_repo_id}"' + if remote: + line += ", remote=True" + line += ")" + code_lines.append(line) + + return "\n".join(code_lines) + "\n" + + +def clean_code_for_chat(result): + lines = result.split("\n") + idx = 0 + while idx < len(lines) and not lines[idx].lstrip().startswith("```"): + idx += 1 + explanation = "\n".join(lines[:idx]).strip() + if idx == len(lines): + return explanation, None + + idx += 1 + start_idx = idx + while not lines[idx].lstrip().startswith("```"): + idx += 1 + code = "\n".join(lines[start_idx:idx]).strip() + + return explanation, code + + +def clean_code_for_run(result): + result = f"I will use the following {result}" + explanation, code = result.split("Answer:") + explanation = explanation.strip() + code = code.strip() + + code_lines = code.split("\n") + if code_lines[0] in ["```", "```py", "```python"]: + code_lines = code_lines[1:] + if code_lines[-1] == "```": + code_lines = code_lines[:-1] + code = "\n".join(code_lines) + + return explanation, code + + +class Agent: + """ + Base class for all agents which contains the main API methods. + + Args: + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + """ + + def __init__(self, chat_prompt_template=None, run_prompt_template=None, additional_tools=None): + _setup_default_tools() + + agent_name = self.__class__.__name__ + self.chat_prompt_template = download_prompt(chat_prompt_template, agent_name, mode="chat") + self.run_prompt_template = download_prompt(run_prompt_template, agent_name, mode="run") + self._toolbox = HUGGINGFACE_DEFAULT_TOOLS.copy() + self.log = print + if additional_tools is not None: + if isinstance(additional_tools, (list, tuple)): + additional_tools = {t.name: t for t in additional_tools} + elif not isinstance(additional_tools, dict): + additional_tools = {additional_tools.name: additional_tools} + + replacements = {name: tool for name, tool in additional_tools.items() if name in HUGGINGFACE_DEFAULT_TOOLS} + self._toolbox.update(additional_tools) + if len(replacements) > 1: + names = "\n".join([f"- {n}: {t}" for n, t in replacements.items()]) + logger.warning( + f"The following tools have been replaced by the ones provided in `additional_tools`:\n{names}." + ) + elif len(replacements) == 1: + name = list(replacements.keys())[0] + logger.warning(f"{name} has been replaced by {replacements[name]} as provided in `additional_tools`.") + + self.prepare_for_new_chat() + + @property + def toolbox(self) -> Dict[str, Tool]: + """Get all tool currently available to the agent""" + return self._toolbox + + def format_prompt(self, task, chat_mode=False): + description = "\n".join([f"- {name}: {tool.description}" for name, tool in self.toolbox.items()]) + if chat_mode: + if self.chat_history is None: + prompt = self.chat_prompt_template.replace("<>", description) + else: + prompt = self.chat_history + prompt += CHAT_MESSAGE_PROMPT.replace("<>", task) + else: + prompt = self.run_prompt_template.replace("<>", description) + prompt = prompt.replace("<>", task) + return prompt + + def set_stream(self, streamer): + """ + Set the function use to stream results (which is `print` by default). + + Args: + streamer (`callable`): The function to call when streaming results from the LLM. + """ + self.log = streamer + + def chat(self, task, *, return_code=False, remote=False, **kwargs): + """ + Sends a new request to the agent in a chat. Will use the previous ones in its history. + + Args: + task (`str`): The task to perform + return_code (`bool`, *optional*, defaults to `False`): + Whether to just return code and not evaluate it. + remote (`bool`, *optional*, defaults to `False`): + Whether or not to use remote tools (inference endpoints) instead of local ones. + kwargs (additional keyword arguments, *optional*): + Any keyword argument to send to the agent when evaluating the code. + + Example: + + ```py + from transformers import HfAgent + + agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") + agent.chat("Draw me a picture of rivers and lakes") + + agent.chat("Transform the picture so that there is a rock in there") + ``` + """ + prompt = self.format_prompt(task, chat_mode=True) + result = self.generate_one(prompt, stop=["Human:", "====="]) + self.chat_history = prompt + result.strip() + "\n" + explanation, code = clean_code_for_chat(result) + + self.log(f"==Explanation from the agent==\n{explanation}") + + if code is not None: + self.log(f"\n\n==Code generated by the agent==\n{code}") + if not return_code: + self.log("\n\n==Result==") + self.cached_tools = resolve_tools(code, self.toolbox, remote=remote, cached_tools=self.cached_tools) + self.chat_state.update(kwargs) + return evaluate(code, self.cached_tools, self.chat_state, chat_mode=True) + else: + tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) + return f"{tool_code}\n{code}" + + def prepare_for_new_chat(self): + """ + Clears the history of prior calls to [`~Agent.chat`]. + """ + self.chat_history = None + self.chat_state = {} + self.cached_tools = None + + def clean_code_for_run(self, result): + """ + Override this method if you want to change the way the code is + cleaned for the `run` method. + """ + return clean_code_for_run(result) + + def run(self, task, *, return_code=False, remote=False, **kwargs): + """ + Sends a request to the agent. + + Args: + task (`str`): The task to perform + return_code (`bool`, *optional*, defaults to `False`): + Whether to just return code and not evaluate it. + remote (`bool`, *optional*, defaults to `False`): + Whether or not to use remote tools (inference endpoints) instead of local ones. + kwargs (additional keyword arguments, *optional*): + Any keyword argument to send to the agent when evaluating the code. + + Example: + + ```py + from transformers import HfAgent + + agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") + agent.run("Draw me a picture of rivers and lakes") + ``` + """ + prompt = self.format_prompt(task) + result = self.generate_one(prompt, stop=["Task:"]) + explanation, code = self.clean_code_for_run(result) + + self.log(f"==Explanation from the agent==\n{explanation}") + + self.log(f"\n\n==Code generated by the agent==\n{code}") + if not return_code: + self.log("\n\n==Result==") + self.cached_tools = resolve_tools(code, self.toolbox, remote=remote, cached_tools=self.cached_tools) + return evaluate(code, self.cached_tools, state=kwargs.copy()) + else: + tool_code = get_tool_creation_code(code, self.toolbox, remote=remote) + return f"{tool_code}\n{code}" + + def generate_one(self, prompt, stop): + # This is the method to implement in your custom agent. + raise NotImplementedError + + def generate_many(self, prompts, stop): + # Override if you have a way to do batch generation faster than one by one + return [self.generate_one(prompt, stop) for prompt in prompts] + + +class OpenAiAgent(Agent): + """ + Agent that uses the openai API to generate code. + + + + The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like + `"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version. + + + + Args: + model (`str`, *optional*, defaults to `"text-davinci-003"`): + The name of the OpenAI model to use. + api_key (`str`, *optional*): + The API key to use. If unset, will look for the environment variable `"OPENAI_API_KEY"`. + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + + Example: + + ```py + from transformers import OpenAiAgent + + agent = OpenAiAgent(model="text-davinci-003", api_key=xxx) + agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") + ``` + """ + + def __init__( + self, + model="text-davinci-003", + api_key=None, + chat_prompt_template=None, + run_prompt_template=None, + additional_tools=None, + ): + if not is_openai_available(): + raise ImportError("Using `OpenAiAgent` requires `openai`: `pip install openai`.") + + if api_key is None: + api_key = os.environ.get("OPENAI_API_KEY", None) + if api_key is None: + raise ValueError( + "You need an openai key to use `OpenAIAgent`. You can get one here: Get one here " + "https://openai.com/api/`. If you have one, set it in your env with `os.environ['OPENAI_API_KEY'] = " + "xxx." + ) + else: + openai.api_key = api_key + self.model = model + super().__init__( + chat_prompt_template=chat_prompt_template, + run_prompt_template=run_prompt_template, + additional_tools=additional_tools, + ) + + def generate_many(self, prompts, stop): + if "gpt" in self.model: + return [self._chat_generate(prompt, stop) for prompt in prompts] + else: + return self._completion_generate(prompts, stop) + + def generate_one(self, prompt, stop): + if "gpt" in self.model: + return self._chat_generate(prompt, stop) + else: + return self._completion_generate([prompt], stop)[0] + + def _chat_generate(self, prompt, stop): + result = openai.chat.completions.create( + model=self.model, + messages=[{"role": "user", "content": prompt}], + temperature=0, + stop=stop, + ) + return result.choices[0].message.content + + def _completion_generate(self, prompts, stop): + result = openai.Completion.create( + model=self.model, + prompt=prompts, + temperature=0, + stop=stop, + max_tokens=200, + ) + return [answer["text"] for answer in result["choices"]] + + +class AzureOpenAiAgent(Agent): + """ + Agent that uses Azure OpenAI to generate code. See the [official + documentation](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/) to learn how to deploy an openAI + model on Azure + + + + The openAI models are used in generation mode, so even for the `chat()` API, it's better to use models like + `"text-davinci-003"` over the chat-GPT variant. Proper support for chat-GPT models will come in a next version. + + + + Args: + deployment_id (`str`): + The name of the deployed Azure openAI model to use. + api_key (`str`, *optional*): + The API key to use. If unset, will look for the environment variable `"AZURE_OPENAI_API_KEY"`. + resource_name (`str`, *optional*): + The name of your Azure OpenAI Resource. If unset, will look for the environment variable + `"AZURE_OPENAI_RESOURCE_NAME"`. + api_version (`str`, *optional*, default to `"2022-12-01"`): + The API version to use for this agent. + is_chat_mode (`bool`, *optional*): + Whether you are using a completion model or a chat model (see note above, chat models won't be as + efficient). Will default to `gpt` being in the `deployment_id` or not. + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + + Example: + + ```py + from transformers import AzureOpenAiAgent + + agent = AzureAiAgent(deployment_id="Davinci-003", api_key=xxx, resource_name=yyy) + agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") + ``` + """ + + def __init__( + self, + deployment_id, + api_key=None, + resource_name=None, + api_version="2022-12-01", + is_chat_model=None, + chat_prompt_template=None, + run_prompt_template=None, + additional_tools=None, + ): + if not is_openai_available(): + raise ImportError("Using `OpenAiAgent` requires `openai`: `pip install openai`.") + + self.deployment_id = deployment_id + openai.api_type = "azure" + if api_key is None: + api_key = os.environ.get("AZURE_OPENAI_API_KEY", None) + if api_key is None: + raise ValueError( + "You need an Azure openAI key to use `AzureOpenAIAgent`. If you have one, set it in your env with " + "`os.environ['AZURE_OPENAI_API_KEY'] = xxx." + ) + else: + openai.api_key = api_key + if resource_name is None: + resource_name = os.environ.get("AZURE_OPENAI_RESOURCE_NAME", None) + if resource_name is None: + raise ValueError( + "You need a resource_name to use `AzureOpenAIAgent`. If you have one, set it in your env with " + "`os.environ['AZURE_OPENAI_RESOURCE_NAME'] = xxx." + ) + else: + openai.api_base = f"https://{resource_name}.openai.azure.com" + openai.api_version = api_version + + if is_chat_model is None: + is_chat_model = "gpt" in deployment_id.lower() + self.is_chat_model = is_chat_model + + super().__init__( + chat_prompt_template=chat_prompt_template, + run_prompt_template=run_prompt_template, + additional_tools=additional_tools, + ) + + def generate_many(self, prompts, stop): + if self.is_chat_model: + return [self._chat_generate(prompt, stop) for prompt in prompts] + else: + return self._completion_generate(prompts, stop) + + def generate_one(self, prompt, stop): + if self.is_chat_model: + return self._chat_generate(prompt, stop) + else: + return self._completion_generate([prompt], stop)[0] + + def _chat_generate(self, prompt, stop): + result = openai.ChatCompletion.create( + engine=self.deployment_id, + messages=[{"role": "user", "content": prompt}], + temperature=0, + stop=stop, + ) + return result["choices"][0]["message"]["content"] + + def _completion_generate(self, prompts, stop): + result = openai.Completion.create( + engine=self.deployment_id, + prompt=prompts, + temperature=0, + stop=stop, + max_tokens=200, + ) + return [answer["text"] for answer in result["choices"]] + + +class HfAgent(Agent): + """ + Agent that uses an inference endpoint to generate code. + + Args: + url_endpoint (`str`): + The name of the url endpoint to use. + token (`str`, *optional*): + The token to use as HTTP bearer authorization for remote files. If unset, will use the token generated when + running `huggingface-cli login` (stored in `~/.huggingface`). + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + + Example: + + ```py + from transformers import HfAgent + + agent = HfAgent("https://api-inference.huggingface.co/models/bigcode/starcoder") + agent.run("Is the following `text` (in Spanish) positive or negative?", text="¡Este es un API muy agradable!") + ``` + """ + + def __init__( + self, url_endpoint, token=None, chat_prompt_template=None, run_prompt_template=None, additional_tools=None + ): + self.url_endpoint = url_endpoint + if token is None: + self.token = f"Bearer {HfFolder().get_token()}" + elif token.startswith("Bearer") or token.startswith("Basic"): + self.token = token + else: + self.token = f"Bearer {token}" + super().__init__( + chat_prompt_template=chat_prompt_template, + run_prompt_template=run_prompt_template, + additional_tools=additional_tools, + ) + + def generate_one(self, prompt, stop): + headers = {"Authorization": self.token} + inputs = { + "inputs": prompt, + "parameters": {"max_new_tokens": 200, "return_full_text": False, "stop": stop}, + } + + response = requests.post(self.url_endpoint, json=inputs, headers=headers) + if response.status_code == 429: + logger.info("Getting rate-limited, waiting a tiny bit before trying again.") + time.sleep(1) + return self._generate_one(prompt) + elif response.status_code != 200: + raise ValueError(f"Error {response.status_code}: {response.json()}") + + result = response.json()[0]["generated_text"] + # Inference API returns the stop sequence + for stop_seq in stop: + if result.endswith(stop_seq): + return result[: -len(stop_seq)] + return result + + +class LocalAgent(Agent): + """ + Agent that uses a local model and tokenizer to generate code. + + Args: + model ([`PreTrainedModel`]): + The model to use for the agent. + tokenizer ([`PreTrainedTokenizer`]): + The tokenizer to use for the agent. + chat_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `chat` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `chat_prompt_template.txt` in this repo in this case. + run_prompt_template (`str`, *optional*): + Pass along your own prompt if you want to override the default template for the `run` method. Can be the + actual prompt template or a repo ID (on the Hugging Face Hub). The prompt should be in a file named + `run_prompt_template.txt` in this repo in this case. + additional_tools ([`Tool`], list of tools or dictionary with tool values, *optional*): + Any additional tools to include on top of the default ones. If you pass along a tool with the same name as + one of the default tools, that default tool will be overridden. + + Example: + + ```py + import torch + from transformers import AutoModelForCausalLM, AutoTokenizer, LocalAgent + + checkpoint = "bigcode/starcoder" + model = AutoModelForCausalLM.from_pretrained(checkpoint, device_map="auto", torch_dtype=torch.bfloat16) + tokenizer = AutoTokenizer.from_pretrained(checkpoint) + + agent = LocalAgent(model, tokenizer) + agent.run("Draw me a picture of rivers and lakes.") + ``` + """ + + def __init__(self, model, tokenizer, chat_prompt_template=None, run_prompt_template=None, additional_tools=None): + self.model = model + self.tokenizer = tokenizer + super().__init__( + chat_prompt_template=chat_prompt_template, + run_prompt_template=run_prompt_template, + additional_tools=additional_tools, + ) + + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): + """ + Convenience method to build a `LocalAgent` from a pretrained checkpoint. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + The name of a repo on the Hub or a local path to a folder containing both model and tokenizer. + kwargs (`Dict[str, Any]`, *optional*): + Keyword arguments passed along to [`~PreTrainedModel.from_pretrained`]. + + Example: + + ```py + import torch + from transformers import LocalAgent + + agent = LocalAgent.from_pretrained("bigcode/starcoder", device_map="auto", torch_dtype=torch.bfloat16) + agent.run("Draw me a picture of rivers and lakes.") + ``` + """ + model = AutoModelForCausalLM.from_pretrained(pretrained_model_name_or_path, **kwargs) + tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, **kwargs) + return cls(model, tokenizer) + + @property + def _model_device(self): + if hasattr(self.model, "hf_device_map"): + return list(self.model.hf_device_map.values())[0] + for param in self.model.parameters(): + return param.device + + def generate_one(self, prompt, stop): + encoded_inputs = self.tokenizer(prompt, return_tensors="pt").to(self._model_device) + src_len = encoded_inputs["input_ids"].shape[1] + stopping_criteria = StoppingCriteriaList([StopSequenceCriteria(stop, self.tokenizer)]) + outputs = self.model.generate( + encoded_inputs["input_ids"], max_new_tokens=200, stopping_criteria=stopping_criteria + ) + + result = self.tokenizer.decode(outputs[0].tolist()[src_len:]) + # Inference API returns the stop sequence + for stop_seq in stop: + if result.endswith(stop_seq): + result = result[: -len(stop_seq)] + return result + + +class StopSequenceCriteria(StoppingCriteria): + """ + This class can be used to stop generation whenever a sequence of tokens is encountered. + + Args: + stop_sequences (`str` or `List[str]`): + The sequence (or list of sequences) on which to stop execution. + tokenizer: + The tokenizer used to decode the model outputs. + """ + + def __init__(self, stop_sequences, tokenizer): + if isinstance(stop_sequences, str): + stop_sequences = [stop_sequences] + self.stop_sequences = stop_sequences + self.tokenizer = tokenizer + + def __call__(self, input_ids, scores, **kwargs) -> bool: + decoded_output = self.tokenizer.decode(input_ids.tolist()[0]) + return any(decoded_output.endswith(stop_sequence) for stop_sequence in self.stop_sequences)