diff --git a/ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/fp32.pt b/ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..2515367869fda2ea1ce92e7084add0ae3a8fb064
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8372e8f5a716ef6fd653031d370765ed4ac4f06eaf8cc2391d39b84c5df2da3c
+size 33555533
diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_5da-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/arithmetic_5da-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..a751332bc6fbae7b680f4412609dcf0695eb972c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/arithmetic_5da-v0-loglikelihood
@@ -0,0 +1 @@
+49edb1e735660631ea6cc309721e6c0b80b7106a613a6959514852ca48f1130e
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_1-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_1-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..f808af460570411d4616b6187dd67fa2ddd6ecee
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_irregular_1-v0-loglikelihood
@@ -0,0 +1 @@
+7fab9f02e71a224ae7931aa77f8a9a61d887a7480756adc965d4746e97fb04a5
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..d765bb590653a5c4eb3e2517f9b3788cdefc7fa5
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-loglikelihood
@@ -0,0 +1 @@
+007c47e5fbf88119c5180feef75e1345d448e56adcd4c7ab2d52fb8d67350d34
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_existential_there_object_raising-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_existential_there_object_raising-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..da3deb1aaf576e90101d03035ae3f9f41b80fd27
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_existential_there_object_raising-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_existential_there_object_raising": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_existential_there_object_raising": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..7697713f85bef6fd2d624f5b9075aae5bfd8f168
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-loglikelihood
@@ -0,0 +1 @@
+d77594382e6d9af31a8b8ef00ba1ef6c29d6be6d0ddb7a9c27ef25ace654e05a
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_passive_2-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_passive_2-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..5a4dd092c4a82b59d702c027e16c684c634649e1
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_passive_2-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_passive_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_passive_2": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_sentential_subject_island-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_sentential_subject_island-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..a7f8f1825ac91b69d8ba1a50a5f87f048aeb3f78
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_sentential_subject_island-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_sentential_subject_island": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_sentential_subject_island": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_wh_questions_object_gap-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_wh_questions_object_gap-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..c3e6af12f2da0a1857c0f0456bf4052d5558329e
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_wh_questions_object_gap-v0-loglikelihood
@@ -0,0 +1 @@
+4d4aaa0274ccd485ff8430ed61b8f83806febe18c16616c7d050f637a0463eba
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..14befd4ab6450dbb2147d66e5458981756bfc25b
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_wh_vs_that_with_gap": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_vs_that_with_gap": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap_long_distance-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap_long_distance-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..34b959139635a241b0fe814ce2ae7240c32a7c1c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap_long_distance-v0-loglikelihood
@@ -0,0 +1 @@
+eed67491bdf493a1dad8f1d9766bc7bd0e79946365b833c0f7eb81ac998e3dca
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/cb-v1-res.json b/lm-evaluation-harness/tests/testdata/cb-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..1cff410b2c35a16b457d163d95ac7cbd8eb704e2
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/cb-v1-res.json
@@ -0,0 +1 @@
+{"results": {"cb": {"acc": 0.3392857142857143, "acc_stderr": 0.06384226561930825, "f1": 0.2819143819143819}}, "versions": {"cb": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-business_ethics-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-business_ethics-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..dcc5116204283941b74dfea97e3a1ce5edd9dc27
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-business_ethics-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-business_ethics": {"acc": 0.29, "acc_norm": 0.27, "acc_norm_stderr": 0.044619604333847394, "acc_stderr": 0.045604802157206845}}, "versions": {"hendrycksTest-business_ethics": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_psychology-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_psychology-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..42b781149bff323130b4491463168f03bdfbb9a9
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_psychology-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-high_school_psychology": {"acc": 0.24587155963302754, "acc_norm": 0.23302752293577983, "acc_norm_stderr": 0.018125669180861493, "acc_stderr": 0.018461940968708436}}, "versions": {"hendrycksTest-high_school_psychology": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_standard-v0-res.json b/lm-evaluation-harness/tests/testdata/lambada_standard-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..1f15d0be56b5edf18ad7cc2bec4977fae99f060b
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_standard-v0-res.json
@@ -0,0 +1 @@
+{"results": {"lambada_standard": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_standard": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/math_counting_and_prob-v1-res.json b/lm-evaluation-harness/tests/testdata/math_counting_and_prob-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..240f7b6b42b77b8e94c1ec2eab2df808181a2cb3
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/math_counting_and_prob-v1-res.json
@@ -0,0 +1 @@
+{"results": {"math_counting_and_prob": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_counting_and_prob": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/mutual-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/mutual-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..0022f466d25f3e3a639720e4600732c9c0c1141d
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/mutual-v0-loglikelihood
@@ -0,0 +1 @@
+f759213a28f0412510bf1a24c9cab0dae64bdee902d42a26225295445e7779db
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/mutual-v0-res.json b/lm-evaluation-harness/tests/testdata/mutual-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..2d240576b3b8d891ff91a47770df9990edf34105
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/mutual-v0-res.json
@@ -0,0 +1 @@
+{"results": {"mutual": {"mrr": 0.5023513920240772, "mrr_stderr": 0.009501864812936679, "r@1": 0.22573363431151242, "r@1_stderr": 0.014053085820407457, "r@2": 0.4221218961625282, "r@2_stderr": 0.016602191705517556}}, "versions": {"mutual": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/openbookqa-v0-res.json b/lm-evaluation-harness/tests/testdata/openbookqa-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..04f4c25442e678a63d3f6213dc9364bfa25b1a7a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/openbookqa-v0-res.json
@@ -0,0 +1 @@
+{"results": {"openbookqa": {"acc": 0.214, "acc_norm": 0.276, "acc_norm_stderr": 0.020011219298073517, "acc_stderr": 0.018359797502387046}}, "versions": {"openbookqa": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_books3-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_books3-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..6ff7a517112eba76e15e999e9974124e04f07a83
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_books3-v1-res.json
@@ -0,0 +1 @@
+{"results": {"pile_books3": {"bits_per_byte": 1.2901280503011222e-06, "byte_perplexity": 1.0000008942490204, "word_perplexity": 1.0000052870063607}}, "versions": {"pile_books3": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_enron-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_enron-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..a4a49493d56db35c99b7e58ea66ebc21304184b2
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_enron-v0-res.json
@@ -0,0 +1 @@
+{"results": {"pile_enron": {"bits_per_byte": 0.0003163902828673244, "byte_perplexity": 1.000316440339552, "word_perplexity": 1.00224668051869}}, "versions": {"pile_enron": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_enron-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_enron-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..abe7b45f9aff9b6427068ceb1ba39977fa843c38
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_enron-v1-res.json
@@ -0,0 +1 @@
+{"results": {"pile_enron": {"bits_per_byte": 0.0004564546920781453, "byte_perplexity": 1.000316440339552, "word_perplexity": 1.00224668051869}}, "versions": {"pile_enron": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_github-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_github-v1-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..cf8251e4f68e2e893624142031e80d4d5777f4f2
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_github-v1-loglikelihood_rolling
@@ -0,0 +1 @@
+df384c3df3d8f53273e97127c5bb84c17e638acad7d6bc9c91f6dee96d43b639
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_opensubtitles-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_opensubtitles-v0-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..47805d3b5fe82555e4d61a90b43c157c974ddabc
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_opensubtitles-v0-loglikelihood_rolling
@@ -0,0 +1 @@
+0f1c23a1f4ddec0c2b1ff34de8d1505b0eb9e2868d8edbcc1b6de13d02f32036
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_opensubtitles-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_opensubtitles-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..f718e515ba0cedfa5156b3a260d50ed55efc32e4
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_opensubtitles-v0-res.json
@@ -0,0 +1 @@
+{"results": {"pile_opensubtitles": {"bits_per_byte": 1.5213441136639177e-05, "byte_perplexity": 1.0000152135568616, "word_perplexity": 1.0000856162053249}}, "versions": {"pile_opensubtitles": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_philpapers-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_philpapers-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..be561fe2f8a6fe5eba08c4c1efd113075da42e1f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_philpapers-v0-res.json
@@ -0,0 +1 @@
+{"results": {"pile_philpapers": {"bits_per_byte": 6.241575895982095e-06, "byte_perplexity": 1.0000062415953748, "word_perplexity": 1.0000409888564146}}, "versions": {"pile_philpapers": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/random_insertion-v0-res.json b/lm-evaluation-harness/tests/testdata/random_insertion-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..9b5f507f6745120414ba5cfd39fc92eac4e48424
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/random_insertion-v0-res.json
@@ -0,0 +1 @@
+{"results": {"random_insertion": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"random_insertion": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/triviaqa-v1-res.json b/lm-evaluation-harness/tests/testdata/triviaqa-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..985f64c8e0eb3bc1dd563becf0cdf186baa172cd
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/triviaqa-v1-res.json
@@ -0,0 +1 @@
+{"results": {"triviaqa": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"triviaqa": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/truthfulqa_gen-v0-res.json b/lm-evaluation-harness/tests/testdata/truthfulqa_gen-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..5e68fa8dc6ace5fd91322aacdc74de3814832d9a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/truthfulqa_gen-v0-res.json
@@ -0,0 +1 @@
+{"results": {"truthfulqa_gen": {"bleu_acc": 0.0, "bleu_acc_stderr": 0.0, "bleu_diff": 0.0, "bleu_diff_stderr": 0.0, "bleu_max": 0.0, "bleu_max_stderr": 0.0, "bleurt_acc": 0.8372093023255814, "bleurt_acc_stderr": 0.012923696051772253, "bleurt_diff": 0.13967358205134603, "bleurt_diff_stderr": 0.00532907098769571, "bleurt_max": -1.4402793981454072, "bleurt_max_stderr": 0.0021884846359458963, "rouge1_acc": 0.0, "rouge1_acc_stderr": 0.0, "rouge1_diff": 0.0, "rouge1_diff_stderr": 0.0, "rouge1_max": 0.0, "rouge1_max_stderr": 0.0, "rouge2_acc": 0.0, "rouge2_acc_stderr": 0.0, "rouge2_diff": 0.0, "rouge2_diff_stderr": 0.0, "rouge2_max": 0.0, "rouge2_max_stderr": 0.0, "rougeL_acc": 0.0, "rougeL_acc_stderr": 0.0, "rougeL_diff": 0.0, "rougeL_diff_stderr": 0.0, "rougeL_max": 0.0, "rougeL_max_stderr": 0.0}}, "versions": {"truthfulqa_gen": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/truthfulqa_mc-v1-loglikelihood b/lm-evaluation-harness/tests/testdata/truthfulqa_mc-v1-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..4bab2d1f4df241fe0cf47f22bf185d52f9b783ef
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/truthfulqa_mc-v1-loglikelihood
@@ -0,0 +1 @@
+1e07020e9cf41d46ed65312eb39d2b8e6599673d4f0d6b67c0d0eba0efb493bb
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/winogrande-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/winogrande-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..97866f6ce45cb9a213d27310a78b7cdeab23bc9a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/winogrande-v0-loglikelihood
@@ -0,0 +1 @@
+90a3eff49de9173964d46f5ed57bcf9a78a72dd1bfe0e5323b25cebb40b49ea9
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wsc273-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/wsc273-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..9d592917bd96783c290237446891eef56083a693
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wsc273-v0-loglikelihood
@@ -0,0 +1 @@
+26450d414c4581feb51a09882080e7a9b95882e7eab47b1751a4a6024b5a60ee
\ No newline at end of file
diff --git a/venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png b/venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png
new file mode 100644
index 0000000000000000000000000000000000000000..07cac5b54f8a39774c151fc70a00552ba83fe5fc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/sympy/plotting/tests/test_region_and.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:115d0b9b81ed40f93fe9e216b4f6384cf71093e3bbb64a5d648b8b9858c645a0
+size 6864
diff --git a/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e3885d381f9c26e34c08af326364bf8309e1be98
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__init__.py
@@ -0,0 +1,67 @@
+# Copyright 2021 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_sentencepiece_available,
+ is_tokenizers_available,
+ is_torch_available,
+ is_vision_available,
+)
+
+
+_import_structure = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_layoutxlm"] = ["LayoutXLMTokenizer"]
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_layoutxlm_fast"] = ["LayoutXLMTokenizerFast"]
+
+if TYPE_CHECKING:
+ from .processing_layoutxlm import LayoutXLMProcessor
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_layoutxlm import LayoutXLMTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1815ea9ee3e0395ea59a47c14c697576bae305ed
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..186192c42ea16afcbf65cbe24fe9a55cf0e59752
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/processing_layoutxlm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6c441688d63e7c4d294172d663f1c5eabd14de43
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..eb5d5a197e485eb056263ab14b65e250a7f2557e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/__pycache__/tokenization_layoutxlm_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/processing_layoutxlm.py b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/processing_layoutxlm.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1d885255b7cc846acaf59af31e8cec8544bd2ae
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/processing_layoutxlm.py
@@ -0,0 +1,200 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for LayoutXLM.
+"""
+import warnings
+from typing import List, Optional, Union
+
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
+from ...utils import TensorType
+
+
+class LayoutXLMProcessor(ProcessorMixin):
+ r"""
+ Constructs a LayoutXLM processor which combines a LayoutXLM image processor and a LayoutXLM tokenizer into a single
+ processor.
+
+ [`LayoutXLMProcessor`] offers all the functionalities you need to prepare data for the model.
+
+ It first uses [`LayoutLMv2ImageProcessor`] to resize document images to a fixed size, and optionally applies OCR to
+ get words and normalized bounding boxes. These are then provided to [`LayoutXLMTokenizer`] or
+ [`LayoutXLMTokenizerFast`], which turns the words and bounding boxes into token-level `input_ids`,
+ `attention_mask`, `token_type_ids`, `bbox`. Optionally, one can provide integer `word_labels`, which are turned
+ into token-level `labels` for token classification tasks (such as FUNSD, CORD).
+
+ Args:
+ image_processor (`LayoutLMv2ImageProcessor`, *optional*):
+ An instance of [`LayoutLMv2ImageProcessor`]. The image processor is a required input.
+ tokenizer (`LayoutXLMTokenizer` or `LayoutXLMTokenizerFast`, *optional*):
+ An instance of [`LayoutXLMTokenizer`] or [`LayoutXLMTokenizerFast`]. The tokenizer is a required input.
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "LayoutLMv2ImageProcessor"
+ tokenizer_class = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
+
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
+ if "feature_extractor" in kwargs:
+ warnings.warn(
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
+ " instead.",
+ FutureWarning,
+ )
+ feature_extractor = kwargs.pop("feature_extractor")
+
+ image_processor = image_processor if image_processor is not None else feature_extractor
+ if image_processor is None:
+ raise ValueError("You need to specify an `image_processor`.")
+ if tokenizer is None:
+ raise ValueError("You need to specify a `tokenizer`.")
+
+ super().__init__(image_processor, tokenizer)
+
+ def __call__(
+ self,
+ images,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None,
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ This method first forwards the `images` argument to [`~LayoutLMv2ImagePrpcessor.__call__`]. In case
+ [`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to `True`, it passes the obtained words and
+ bounding boxes along with the additional arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output,
+ together with resized `images`. In case [`LayoutLMv2ImagePrpcessor`] was initialized with `apply_ocr` set to
+ `False`, it passes the words (`text`/``text_pair`) and `boxes` specified by the user along with the additional
+ arguments to [`~LayoutXLMTokenizer.__call__`] and returns the output, together with resized `images``.
+
+ Please refer to the docstring of the above two methods for more information.
+ """
+ # verify input
+ if self.image_processor.apply_ocr and (boxes is not None):
+ raise ValueError(
+ "You cannot provide bounding boxes "
+ "if you initialized the image processor with apply_ocr set to True."
+ )
+
+ if self.image_processor.apply_ocr and (word_labels is not None):
+ raise ValueError(
+ "You cannot provide word labels if you initialized the image processor with apply_ocr set to True."
+ )
+
+ if return_overflowing_tokens is True and return_offsets_mapping is False:
+ raise ValueError("You cannot return overflowing tokens without returning the offsets mapping.")
+
+ # first, apply the image processor
+ features = self.image_processor(images=images, return_tensors=return_tensors)
+
+ # second, apply the tokenizer
+ if text is not None and self.image_processor.apply_ocr and text_pair is None:
+ if isinstance(text, str):
+ text = [text] # add batch dimension (as the image processor always adds a batch dimension)
+ text_pair = features["words"]
+
+ encoded_inputs = self.tokenizer(
+ text=text if text is not None else features["words"],
+ text_pair=text_pair if text_pair is not None else None,
+ boxes=boxes if boxes is not None else features["boxes"],
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+
+ # add pixel values
+ images = features.pop("pixel_values")
+ if return_overflowing_tokens is True:
+ images = self.get_overflowing_images(images, encoded_inputs["overflow_to_sample_mapping"])
+ encoded_inputs["image"] = images
+
+ return encoded_inputs
+
+ def get_overflowing_images(self, images, overflow_to_sample_mapping):
+ # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
+ images_with_overflow = []
+ for sample_idx in overflow_to_sample_mapping:
+ images_with_overflow.append(images[sample_idx])
+
+ if len(images_with_overflow) != len(overflow_to_sample_mapping):
+ raise ValueError(
+ "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
+ f" {len(images_with_overflow)} and {len(overflow_to_sample_mapping)}"
+ )
+
+ return images_with_overflow
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to PreTrainedTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer
+ to the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ return ["input_ids", "bbox", "attention_mask", "image"]
+
+ @property
+ def feature_extractor_class(self):
+ warnings.warn(
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
+ FutureWarning,
+ )
+ return self.image_processor_class
+
+ @property
+ def feature_extractor(self):
+ warnings.warn(
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
+ FutureWarning,
+ )
+ return self.image_processor
diff --git a/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py
new file mode 100644
index 0000000000000000000000000000000000000000..bbfdf44a1e60208f1b7376bbfc53a839397c75df
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm.py
@@ -0,0 +1,1170 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+""" Tokenization classes for LayoutXLM model."""
+
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import sentencepiece as spm
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...tokenization_utils_base import (
+ BatchEncoding,
+ EncodedInput,
+ PreTokenizedInput,
+ TextInput,
+ TextInputPair,
+ TruncationStrategy,
+)
+from ...utils import PaddingStrategy, TensorType, add_end_docstrings, logging
+from ..xlm_roberta.tokenization_xlm_roberta import (
+ SPIECE_UNDERLINE,
+ VOCAB_FILES_NAMES,
+)
+
+
+logger = logging.get_logger(__name__)
+
+
+LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r"""
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to encode the sequences with the special tokens relative to their model.
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Activates and controls padding. Accepts the following values:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
+ Activates and controls truncation. Accepts the following values:
+
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
+ to the maximum acceptable input length for the model if that argument is not provided. This will
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
+ sequences (or a batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
+ greater than the model maximum admissible input size).
+ max_length (`int`, *optional*):
+ Controls the maximum length to use by one of the truncation/padding parameters.
+
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
+ stride (`int`, *optional*, defaults to 0):
+ If set to a number along with `max_length`, the overflowing tokens returned when
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
+ argument defines the number of overlapping tokens.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ return_token_type_ids (`bool`, *optional*):
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+ [What are attention masks?](../glossary#attention-mask)
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
+ of returning overflowing tokens.
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
+ Whether or not to return special tokens mask information.
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
+ Whether or not to return `(char_start, char_end)` for each token.
+
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
+ Python's tokenizer, this method will raise `NotImplementedError`.
+ return_length (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the lengths of the encoded inputs.
+ verbose (`bool`, *optional*, defaults to `True`):
+ Whether or not to print more information and warnings.
+ **kwargs: passed to the `self.tokenize()` method
+
+ Return:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ - **bbox** -- List of bounding boxes to be fed to a model.
+
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
+ if *"token_type_ids"* is in `self.model_input_names`).
+
+ [What are token type IDs?](../glossary#token-type-ids)
+
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
+ `return_overflowing_tokens=True`).
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
+ `return_overflowing_tokens=True`).
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
+ - **length** -- The length of the inputs (when `return_length=True`).
+"""
+
+
+class LayoutXLMTokenizer(PreTrainedTokenizer):
+ """
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [SentencePiece](https://github.com/google/sentencepiece).
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
+ The bounding box to use for the special [CLS] token.
+ sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
+ The bounding box to use for the special [SEP] token.
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
+ The bounding box to use for the special [PAD] token.
+ pad_token_label (`int`, *optional*, defaults to -100):
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
+ CrossEntropyLoss.
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
+ Whether or not to only label the first subword, in case word labels are provided.
+ sp_model_kwargs (`dict`, *optional*):
+ Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for
+ SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things,
+ to set:
+
+ - `enable_sampling`: Enable subword regularization.
+ - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout.
+
+ - `nbest_size = {0,1}`: No sampling is performed.
+ - `nbest_size > 1`: samples from the nbest_size results.
+ - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice)
+ using forward-filtering-and-backward-sampling algorithm.
+
+ - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for
+ BPE-dropout.
+
+ Attributes:
+ sp_model (`SentencePieceProcessor`):
+ The *SentencePiece* processor that is used for every conversion (string, tokens and IDs).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ cls_token_box=[0, 0, 0, 0],
+ sep_token_box=[1000, 1000, 1000, 1000],
+ pad_token_box=[0, 0, 0, 0],
+ pad_token_label=-100,
+ only_label_first_subword=True,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ **kwargs,
+ ) -> None:
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(str(vocab_file))
+ self.vocab_file = vocab_file
+
+ # Original fairseq vocab and spm vocab must be "aligned":
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
+ # fairseq | '' | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-'
+ # spm | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
+
+ # Mimic fairseq token-to-id alignment for the first 4 token
+ self.fairseq_tokens_to_ids = {"": 0, "": 1, "": 2, "": 3}
+
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
+ self.fairseq_offset = 1
+
+ self.fairseq_tokens_to_ids[""] = len(self.sp_model) + self.fairseq_offset
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
+
+ # additional properties
+ self.cls_token_box = cls_token_box
+ self.sep_token_box = sep_token_box
+ self.pad_token_box = pad_token_box
+ self.pad_token_label = pad_token_label
+ self.only_label_first_subword = only_label_first_subword
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ cls_token_box=cls_token_box,
+ sep_token_box=sep_token_box,
+ pad_token_box=pad_token_box,
+ pad_token_label=pad_token_label,
+ only_label_first_subword=only_label_first_subword,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
+ not make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ @property
+ def vocab_size(self):
+ return len(self.sp_model) + self.fairseq_offset + 1 # Add the token
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text: str) -> List[str]:
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ if token in self.fairseq_tokens_to_ids:
+ return self.fairseq_tokens_to_ids[token]
+ spm_id = self.sp_model.PieceToId(token)
+
+ # Need to return unknown token if the SP model returned 0
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ if index in self.fairseq_ids_to_tokens:
+ return self.fairseq_ids_to_tokens[index]
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
+ sequences with word-level normalized bounding boxes and optional labels.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
+ words).
+ text_pair (`List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
+ (pretokenized string).
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
+ """
+
+ # Input type checking for clearer error
+ def _is_valid_text_input(t):
+ if isinstance(t, str):
+ # Strings are fine
+ return True
+ elif isinstance(t, (list, tuple)):
+ # List are fine as long as they are...
+ if len(t) == 0:
+ # ... empty
+ return True
+ elif isinstance(t[0], str):
+ # ... list of strings
+ return True
+ elif isinstance(t[0], (list, tuple)):
+ # ... list with an empty list or with a list of strings
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
+ else:
+ return False
+ else:
+ return False
+
+ if text_pair is not None:
+ # in case text + text_pair are provided, text = questions, text_pair = words
+ if not _is_valid_text_input(text):
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
+ if not isinstance(text_pair, (list, tuple)):
+ raise ValueError(
+ "words must of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+ else:
+ # in case only text is provided => must be words
+ if not isinstance(text, (list, tuple)):
+ raise ValueError(
+ "Words must of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+
+ if text_pair is not None:
+ is_batched = isinstance(text, (list, tuple))
+ else:
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
+
+ words = text if text_pair is None else text_pair
+ if boxes is None:
+ raise ValueError("You must provide corresponding bounding boxes")
+ if is_batched:
+ if len(words) != len(boxes):
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
+ for words_example, boxes_example in zip(words, boxes):
+ if len(words_example) != len(boxes_example):
+ raise ValueError("You must provide as many words as there are bounding boxes")
+ else:
+ if len(words) != len(boxes):
+ raise ValueError("You must provide as many words as there are bounding boxes")
+
+ if is_batched:
+ if text_pair is not None and len(text) != len(text_pair):
+ raise ValueError(
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
+ f" {len(text_pair)}."
+ )
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
+ is_pair = bool(text_pair is not None)
+ return self.batch_encode_plus(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ is_pair=is_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.encode_plus(
+ text=text,
+ text_pair=text_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _batch_encode_plus(
+ self,
+ batch_text_or_text_pairs: Union[
+ List[TextInput],
+ List[TextInputPair],
+ List[PreTokenizedInput],
+ ],
+ is_pair: bool = None,
+ boxes: Optional[List[List[List[int]]]] = None,
+ word_labels: Optional[List[List[int]]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast."
+ )
+
+ batch_outputs = self._batch_prepare_for_model(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ is_pair=is_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=return_tensors,
+ verbose=verbose,
+ )
+
+ return BatchEncoding(batch_outputs)
+
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
+ def _batch_prepare_for_model(
+ self,
+ batch_text_or_text_pairs,
+ is_pair: bool = None,
+ boxes: Optional[List[List[int]]] = None,
+ word_labels: Optional[List[List[int]]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[str] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ ) -> BatchEncoding:
+ """
+ Prepares a sequence of input id, or a pair of sequences of inputs ids so that it can be used by the model. It
+ adds special tokens, truncates sequences if overflowing while taking into account the special tokens and
+ manages a moving window (with user defined stride) for overflowing tokens
+
+ Args:
+ batch_ids_pairs: list of tokenized input ids or input ids pairs
+ """
+
+ batch_outputs = {}
+ for idx, example in enumerate(zip(batch_text_or_text_pairs, boxes)):
+ batch_text_or_text_pair, boxes_example = example
+ outputs = self.prepare_for_model(
+ batch_text_or_text_pair[0] if is_pair else batch_text_or_text_pair,
+ batch_text_or_text_pair[1] if is_pair else None,
+ boxes_example,
+ word_labels=word_labels[idx] if word_labels is not None else None,
+ add_special_tokens=add_special_tokens,
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterward
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=None, # we pad in batch afterward
+ return_attention_mask=False, # we pad in batch afterward
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=None, # We convert the whole batch to tensors at the end
+ prepend_batch_axis=False,
+ verbose=verbose,
+ )
+
+ for key, value in outputs.items():
+ if key not in batch_outputs:
+ batch_outputs[key] = []
+ batch_outputs[key].append(value)
+
+ batch_outputs = self.pad(
+ batch_outputs,
+ padding=padding_strategy.value,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
+
+ return batch_outputs
+
+ def _encode_plus(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ boxes: Optional[List[List[int]]] = None,
+ word_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast. "
+ "More information on available tokenizers at "
+ "https://github.com/huggingface/transformers/pull/2674"
+ )
+
+ return self.prepare_for_model(
+ text=text,
+ text_pair=text_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding_strategy.value,
+ truncation=truncation_strategy.value,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ prepend_batch_axis=True,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ verbose=verbose,
+ )
+
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
+ def prepare_for_model(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ boxes: Optional[List[List[int]]] = None,
+ word_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ prepend_batch_axis: bool = False,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepares a sequence or a pair of sequences so that it can be used by the model. It adds special tokens,
+ truncates sequences if overflowing while taking into account the special tokens and manages a moving window
+ (with user defined stride) for overflowing tokens.
+
+ Word-level `boxes` are turned into token-level `bbox`. If provided, word-level `word_labels` are turned into
+ token-level `labels`. The word label is used for the first token of the word, while remaining tokens are
+ labeled with -100, such that they will be ignored by the loss function.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The first sequence to be encoded. This can be a string, a list of strings or a list of list of strings.
+ text_pair (`List[str]` or `List[int]`, *optional*):
+ Optional second sequence to be encoded. This can be a list of strings (words of a single example) or a
+ list of list of strings (words of a batch of examples).
+ """
+
+ # Backward compatibility for 'truncation_strategy', 'pad_to_max_length'
+ padding_strategy, truncation_strategy, max_length, kwargs = self._get_padding_truncation_strategies(
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ tokens = []
+ pair_tokens = []
+ token_boxes = []
+ pair_token_boxes = []
+ labels = []
+
+ if text_pair is None:
+ if word_labels is None:
+ # CASE 1: document image classification (training + inference) + CASE 2: token classification (inference)
+ for word, box in zip(text, boxes):
+ if len(word) < 1: # skip empty words
+ continue
+ word_tokens = self.tokenize(word)
+ tokens.extend(word_tokens)
+ token_boxes.extend([box] * len(word_tokens))
+ else:
+ # CASE 2: token classification (training)
+ for word, box, label in zip(text, boxes, word_labels):
+ if len(word) < 1: # skip empty words
+ continue
+ word_tokens = self.tokenize(word)
+ tokens.extend(word_tokens)
+ token_boxes.extend([box] * len(word_tokens))
+ if self.only_label_first_subword:
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
+ labels.extend([label] + [self.pad_token_label] * (len(word_tokens) - 1))
+ else:
+ labels.extend([label] * len(word_tokens))
+ else:
+ # CASE 3: document visual question answering (inference)
+ # text = question
+ # text_pair = words
+ tokens = self.tokenize(text)
+ token_boxes = [self.pad_token_box for _ in range(len(tokens))] + [self.sep_token_box]
+
+ for word, box in zip(text_pair, boxes):
+ if len(word) < 1: # skip empty words
+ continue
+ word_tokens = self.tokenize(word)
+ pair_tokens.extend(word_tokens)
+ pair_token_boxes.extend([box] * len(word_tokens))
+
+ # Create ids + pair_ids
+ ids = self.convert_tokens_to_ids(tokens)
+ pair_ids = self.convert_tokens_to_ids(pair_tokens) if pair_tokens else None
+
+ # Compute the total size of the returned encodings
+ pair = bool(pair_ids is not None)
+ len_ids = len(ids)
+ len_pair_ids = len(pair_ids) if pair else 0
+ total_len = len_ids + len_pair_ids + (self.num_special_tokens_to_add(pair=pair) if add_special_tokens else 0)
+
+ # Truncation: Handle max sequence length
+ overflowing_tokens = []
+ overflowing_token_boxes = []
+ overflowing_labels = []
+ if truncation_strategy != TruncationStrategy.DO_NOT_TRUNCATE and max_length and total_len > max_length:
+ (
+ ids,
+ token_boxes,
+ pair_ids,
+ pair_token_boxes,
+ labels,
+ overflowing_tokens,
+ overflowing_token_boxes,
+ overflowing_labels,
+ ) = self.truncate_sequences(
+ ids,
+ token_boxes,
+ pair_ids=pair_ids,
+ pair_token_boxes=pair_token_boxes,
+ labels=labels,
+ num_tokens_to_remove=total_len - max_length,
+ truncation_strategy=truncation_strategy,
+ stride=stride,
+ )
+
+ if return_token_type_ids and not add_special_tokens:
+ raise ValueError(
+ "Asking to return token_type_ids while setting add_special_tokens to False "
+ "results in an undefined behavior. Please set add_special_tokens to True or "
+ "set return_token_type_ids to None."
+ )
+
+ # Load from model defaults
+ if return_token_type_ids is None:
+ return_token_type_ids = "token_type_ids" in self.model_input_names
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ encoded_inputs = {}
+
+ if return_overflowing_tokens:
+ encoded_inputs["overflowing_tokens"] = overflowing_tokens
+ encoded_inputs["overflowing_token_boxes"] = overflowing_token_boxes
+ encoded_inputs["overflowing_labels"] = overflowing_labels
+ encoded_inputs["num_truncated_tokens"] = total_len - max_length
+
+ # Add special tokens
+ if add_special_tokens:
+ sequence = self.build_inputs_with_special_tokens(ids, pair_ids)
+ token_type_ids = self.create_token_type_ids_from_sequences(ids, pair_ids)
+ token_boxes = [self.cls_token_box] + token_boxes + [self.sep_token_box]
+ if pair_token_boxes:
+ pair_token_boxes = pair_token_boxes + [self.sep_token_box]
+ if labels:
+ labels = [self.pad_token_label] + labels + [self.pad_token_label]
+ else:
+ sequence = ids + pair_ids if pair else ids
+ token_type_ids = [0] * len(ids) + ([0] * len(pair_ids) if pair else [])
+
+ # Build output dictionary
+ encoded_inputs["input_ids"] = sequence
+ encoded_inputs["bbox"] = token_boxes + pair_token_boxes
+ if return_token_type_ids:
+ encoded_inputs["token_type_ids"] = token_type_ids
+ if return_special_tokens_mask:
+ if add_special_tokens:
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(ids, pair_ids)
+ else:
+ encoded_inputs["special_tokens_mask"] = [0] * len(sequence)
+
+ if labels:
+ encoded_inputs["labels"] = labels
+
+ # Check lengths
+ self._eventual_warn_about_too_long_sequence(encoded_inputs["input_ids"], max_length, verbose)
+
+ # Padding
+ if padding_strategy != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
+ encoded_inputs = self.pad(
+ encoded_inputs,
+ max_length=max_length,
+ padding=padding_strategy.value,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ if return_length:
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
+
+ batch_outputs = BatchEncoding(
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
+ )
+
+ return batch_outputs
+
+ def truncate_sequences(
+ self,
+ ids: List[int],
+ token_boxes: List[List[int]],
+ pair_ids: Optional[List[int]] = None,
+ pair_token_boxes: Optional[List[List[int]]] = None,
+ labels: Optional[List[int]] = None,
+ num_tokens_to_remove: int = 0,
+ truncation_strategy: Union[str, TruncationStrategy] = "longest_first",
+ stride: int = 0,
+ ) -> Tuple[List[int], List[int], List[int]]:
+ """
+ Truncates a sequence pair in-place following the strategy.
+
+ Args:
+ ids (`List[int]`):
+ Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and
+ `convert_tokens_to_ids` methods.
+ token_boxes (`List[List[int]]`):
+ Bounding boxes of the first sequence.
+ pair_ids (`List[int]`, *optional*):
+ Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize`
+ and `convert_tokens_to_ids` methods.
+ pair_token_boxes (`List[List[int]]`, *optional*):
+ Bounding boxes of the second sequence.
+ labels (`List[int]`, *optional*):
+ Labels of the first sequence (for token classification tasks).
+ num_tokens_to_remove (`int`, *optional*, defaults to 0):
+ Number of tokens to remove using the truncation strategy.
+ truncation_strategy (`str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
+ The strategy to follow for truncation. Can be:
+
+ - `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will truncate
+ token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a
+ batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater
+ than the model maximum admissible input size).
+ stride (`int`, *optional*, defaults to 0):
+ If set to a positive number, the overflowing tokens returned will contain some tokens from the main
+ sequence returned. The value of this argument defines the number of additional tokens.
+
+ Returns:
+ `Tuple[List[int], List[int], List[int]]`: The truncated `ids`, the truncated `pair_ids` and the list of
+ overflowing tokens.
+ """
+ if num_tokens_to_remove <= 0:
+ return ids, token_boxes, pair_ids, pair_token_boxes, labels, [], [], []
+
+ if not isinstance(truncation_strategy, TruncationStrategy):
+ truncation_strategy = TruncationStrategy(truncation_strategy)
+
+ overflowing_tokens = []
+ overflowing_token_boxes = []
+ overflowing_labels = []
+ if truncation_strategy == TruncationStrategy.LONGEST_FIRST:
+ for _ in range(num_tokens_to_remove):
+ if pair_ids is None or len(ids) > len(pair_ids):
+ if not overflowing_tokens:
+ window_len = min(len(ids), stride + 1)
+ else:
+ window_len = 1
+ overflowing_tokens.extend(ids[-window_len:])
+ overflowing_token_boxes.extend(token_boxes[-window_len:])
+ overflowing_labels.extend(labels[-window_len:])
+ ids = ids[:-1]
+ token_boxes = token_boxes[:-1]
+ labels = labels[:-1]
+ else:
+ if not overflowing_tokens:
+ window_len = min(len(pair_ids), stride + 1)
+ else:
+ window_len = 1
+ overflowing_tokens.extend(pair_ids[-window_len:])
+ overflowing_token_boxes.extend(pair_token_boxes[-window_len:])
+ pair_ids = pair_ids[:-1]
+ pair_token_boxes = pair_token_boxes[:-1]
+ elif truncation_strategy == TruncationStrategy.ONLY_FIRST:
+ if len(ids) > num_tokens_to_remove:
+ window_len = min(len(ids), stride + num_tokens_to_remove)
+ overflowing_tokens = ids[-window_len:]
+ overflowing_token_boxes = token_boxes[-window_len:]
+ overflowing_labels = labels[-window_len:]
+ ids = ids[:-num_tokens_to_remove]
+ token_boxes = token_boxes[:-num_tokens_to_remove]
+ labels = labels[:-num_tokens_to_remove]
+ else:
+ logger.error(
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
+ f"but the first sequence has a length {len(ids)}. "
+ f"Please select another truncation strategy than {truncation_strategy}, "
+ "for instance 'longest_first' or 'only_second'."
+ )
+ elif truncation_strategy == TruncationStrategy.ONLY_SECOND and pair_ids is not None:
+ if len(pair_ids) > num_tokens_to_remove:
+ window_len = min(len(pair_ids), stride + num_tokens_to_remove)
+ overflowing_tokens = pair_ids[-window_len:]
+ overflowing_token_boxes = pair_token_boxes[-window_len:]
+ pair_ids = pair_ids[:-num_tokens_to_remove]
+ pair_token_boxes = pair_token_boxes[:-num_tokens_to_remove]
+ else:
+ logger.error(
+ f"We need to remove {num_tokens_to_remove} to truncate the input "
+ f"but the second sequence has a length {len(pair_ids)}. "
+ f"Please select another truncation strategy than {truncation_strategy}, "
+ "for instance 'longest_first' or 'only_first'."
+ )
+
+ return (
+ ids,
+ token_boxes,
+ pair_ids,
+ pair_token_boxes,
+ labels,
+ overflowing_tokens,
+ overflowing_token_boxes,
+ overflowing_labels,
+ )
+
+ def _pad(
+ self,
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
+ max_length: Optional[int] = None,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ ) -> dict:
+ """
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
+
+ Args:
+ encoded_inputs:
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
+ max_length: maximum length of the returned list and optionally padding length (see below).
+ Will truncate by taking into account the special tokens.
+ padding_strategy: PaddingStrategy to use for padding.
+
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
+ The tokenizer padding sides are defined in self.padding_side:
+
+ - 'left': pads on the left of the sequences
+ - 'right': pads on the right of the sequences
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta).
+ return_attention_mask:
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
+ """
+ # Load from model defaults
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ required_input = encoded_inputs[self.model_input_names[0]]
+
+ if padding_strategy == PaddingStrategy.LONGEST:
+ max_length = len(required_input)
+
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
+
+ # Initialize attention mask if not present.
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
+
+ if needs_to_be_padded:
+ difference = max_length - len(required_input)
+ if self.padding_side == "right":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = (
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
+ )
+ if "bbox" in encoded_inputs:
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
+ elif self.padding_side == "left":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
+ "token_type_ids"
+ ]
+ if "bbox" in encoded_inputs:
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
+ else:
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
+
+ return encoded_inputs
diff --git a/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..e899d8b22e4df6ce2e3894cdcfb92987f43ec866
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py
@@ -0,0 +1,800 @@
+# coding=utf-8
+# Copyright 2021 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+""" Tokenization classes for LayoutXLM model."""
+
+
+import os
+from shutil import copyfile
+from typing import Dict, List, Optional, Tuple, Union
+
+from ...tokenization_utils import AddedToken
+from ...tokenization_utils_base import (
+ BatchEncoding,
+ EncodedInput,
+ PreTokenizedInput,
+ TextInput,
+ TextInputPair,
+ TruncationStrategy,
+)
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import PaddingStrategy, TensorType, add_end_docstrings, is_sentencepiece_available, logging
+from ..xlm_roberta.tokenization_xlm_roberta_fast import (
+ VOCAB_FILES_NAMES,
+)
+
+
+if is_sentencepiece_available():
+ from .tokenization_layoutxlm import LayoutXLMTokenizer
+else:
+ LayoutXLMTokenizer = None
+
+
+logger = logging.get_logger(__name__)
+
+LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r"""
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to encode the sequences with the special tokens relative to their model.
+ padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Activates and controls padding. Accepts the following values:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
+ Activates and controls truncation. Accepts the following values:
+
+ - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or
+ to the maximum acceptable input length for the model if that argument is not provided. This will
+ truncate token by token, removing a token from the longest sequence in the pair if a pair of
+ sequences (or a batch of pairs) is provided.
+ - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the
+ maximum acceptable input length for the model if that argument is not provided. This will only
+ truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
+ greater than the model maximum admissible input size).
+ max_length (`int`, *optional*):
+ Controls the maximum length to use by one of the truncation/padding parameters.
+
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
+ stride (`int`, *optional*, defaults to 0):
+ If set to a number along with `max_length`, the overflowing tokens returned when
+ `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence
+ returned to provide some overlap between truncated and overflowing sequences. The value of this
+ argument defines the number of overlapping tokens.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
+ return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ return_token_type_ids (`bool`, *optional*):
+ Whether to return token type IDs. If left to the default, will return the token type IDs according to
+ the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific tokenizer's default, defined by the `return_outputs` attribute.
+
+ [What are attention masks?](../glossary#attention-mask)
+ return_overflowing_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch
+ of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead
+ of returning overflowing tokens.
+ return_special_tokens_mask (`bool`, *optional*, defaults to `False`):
+ Whether or not to return special tokens mask information.
+ return_offsets_mapping (`bool`, *optional*, defaults to `False`):
+ Whether or not to return `(char_start, char_end)` for each token.
+
+ This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using
+ Python's tokenizer, this method will raise `NotImplementedError`.
+ return_length (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the lengths of the encoded inputs.
+ verbose (`bool`, *optional*, defaults to `True`):
+ Whether or not to print more information and warnings.
+ **kwargs: passed to the `self.tokenize()` method
+
+ Return:
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
+
+ - **input_ids** -- List of token ids to be fed to a model.
+
+ [What are input IDs?](../glossary#input-ids)
+
+ - **bbox** -- List of bounding boxes to be fed to a model.
+
+ - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or
+ if *"token_type_ids"* is in `self.model_input_names`).
+
+ [What are token type IDs?](../glossary#token-type-ids)
+
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`).
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified).
+ - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and
+ `return_overflowing_tokens=True`).
+ - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and
+ `return_overflowing_tokens=True`).
+ - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying
+ regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`).
+ - **length** -- The length of the inputs (when `return_length=True`).
+"""
+
+
+class LayoutXLMTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" LayoutXLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from
+ [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
+ The bounding box to use for the special [CLS] token.
+ sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`):
+ The bounding box to use for the special [SEP] token.
+ pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`):
+ The bounding box to use for the special [PAD] token.
+ pad_token_label (`int`, *optional*, defaults to -100):
+ The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's
+ CrossEntropyLoss.
+ only_label_first_subword (`bool`, *optional*, defaults to `True`):
+ Whether or not to only label the first subword, in case word labels are provided.
+ additional_special_tokens (`List[str]`, *optional*, defaults to `["NOTUSED", "NOTUSED"]`):
+ Additional special tokens used by the tokenizer.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = LayoutXLMTokenizer
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ cls_token_box=[0, 0, 0, 0],
+ sep_token_box=[1000, 1000, 1000, 1000],
+ pad_token_box=[0, 0, 0, 0],
+ pad_token_label=-100,
+ only_label_first_subword=True,
+ **kwargs,
+ ):
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ cls_token_box=cls_token_box,
+ sep_token_box=sep_token_box,
+ pad_token_box=pad_token_box,
+ pad_token_label=pad_token_label,
+ only_label_first_subword=only_label_first_subword,
+ **kwargs,
+ )
+
+ self.vocab_file = vocab_file
+
+ # additional properties
+ self.cls_token_box = cls_token_box
+ self.sep_token_box = sep_token_box
+ self.pad_token_box = pad_token_box
+ self.pad_token_label = pad_token_label
+ self.only_label_first_subword = only_label_first_subword
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING)
+ def __call__(
+ self,
+ text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]],
+ text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None,
+ boxes: Union[List[List[int]], List[List[List[int]]]] = None,
+ word_labels: Optional[Union[List[int], List[List[int]]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of
+ sequences with word-level normalized bounding boxes and optional labels.
+
+ Args:
+ text (`str`, `List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings
+ (words of a single example or questions of a batch of examples) or a list of list of strings (batch of
+ words).
+ text_pair (`List[str]`, `List[List[str]]`):
+ The sequence or batch of sequences to be encoded. Each sequence should be a list of strings
+ (pretokenized string).
+ boxes (`List[List[int]]`, `List[List[List[int]]]`):
+ Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale.
+ word_labels (`List[int]`, `List[List[int]]`, *optional*):
+ Word-level integer labels (for token classification tasks such as FUNSD, CORD).
+ """
+
+ # Input type checking for clearer error
+ def _is_valid_text_input(t):
+ if isinstance(t, str):
+ # Strings are fine
+ return True
+ elif isinstance(t, (list, tuple)):
+ # List are fine as long as they are...
+ if len(t) == 0:
+ # ... empty
+ return True
+ elif isinstance(t[0], str):
+ # ... list of strings
+ return True
+ elif isinstance(t[0], (list, tuple)):
+ # ... list with an empty list or with a list of strings
+ return len(t[0]) == 0 or isinstance(t[0][0], str)
+ else:
+ return False
+ else:
+ return False
+
+ if text_pair is not None:
+ # in case text + text_pair are provided, text = questions, text_pair = words
+ if not _is_valid_text_input(text):
+ raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ")
+ if not isinstance(text_pair, (list, tuple)):
+ raise ValueError(
+ "words must of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+ else:
+ # in case only text is provided => must be words
+ if not isinstance(text, (list, tuple)):
+ raise ValueError(
+ "Words must of type `List[str]` (single pretokenized example), "
+ "or `List[List[str]]` (batch of pretokenized examples)."
+ )
+
+ if text_pair is not None:
+ is_batched = isinstance(text, (list, tuple))
+ else:
+ is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple))
+
+ words = text if text_pair is None else text_pair
+ if boxes is None:
+ raise ValueError("You must provide corresponding bounding boxes")
+ if is_batched:
+ if len(words) != len(boxes):
+ raise ValueError("You must provide words and boxes for an equal amount of examples")
+ for words_example, boxes_example in zip(words, boxes):
+ if len(words_example) != len(boxes_example):
+ raise ValueError("You must provide as many words as there are bounding boxes")
+ else:
+ if len(words) != len(boxes):
+ raise ValueError("You must provide as many words as there are bounding boxes")
+
+ if is_batched:
+ if text_pair is not None and len(text) != len(text_pair):
+ raise ValueError(
+ f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:"
+ f" {len(text_pair)}."
+ )
+ batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text
+ is_pair = bool(text_pair is not None)
+ return self.batch_encode_plus(
+ batch_text_or_text_pairs=batch_text_or_text_pairs,
+ is_pair=is_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.encode_plus(
+ text=text,
+ text_pair=text_pair,
+ boxes=boxes,
+ word_labels=word_labels,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]:
+ batched_input = [(text, pair)] if pair else [text]
+ encodings = self._tokenizer.encode_batch(
+ batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs
+ )
+
+ return encodings[0].tokens
+
+ def _batch_encode_plus(
+ self,
+ batch_text_or_text_pairs: Union[
+ List[TextInput],
+ List[TextInputPair],
+ List[PreTokenizedInput],
+ ],
+ is_pair: bool = None,
+ boxes: Optional[List[List[List[int]]]] = None,
+ word_labels: Optional[List[List[int]]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[str] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ if not isinstance(batch_text_or_text_pairs, list):
+ raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})")
+
+ # Set the truncation and padding strategy and restore the initial configuration
+ self.set_truncation_and_padding(
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ )
+
+ if is_pair:
+ batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs]
+
+ encodings = self._tokenizer.encode_batch(
+ batch_text_or_text_pairs,
+ add_special_tokens=add_special_tokens,
+ is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs
+ )
+
+ # Convert encoding to dict
+ # `Tokens` has type: Tuple[
+ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]],
+ # List[EncodingFast]
+ # ]
+ # with nested dimensions corresponding to batch, overflows, sequence length
+ tokens_and_encodings = [
+ self._convert_encoding(
+ encoding=encoding,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=True
+ if word_labels is not None
+ else return_offsets_mapping, # we use offsets to create the labels
+ return_length=return_length,
+ verbose=verbose,
+ )
+ for encoding in encodings
+ ]
+
+ # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension
+ # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length)
+ # (we say ~ because the number of overflow varies with the example in the batch)
+ #
+ # To match each overflowing sample with the original sample in the batch
+ # we add an overflow_to_sample_mapping array (see below)
+ sanitized_tokens = {}
+ for key in tokens_and_encodings[0][0].keys():
+ stack = [e for item, _ in tokens_and_encodings for e in item[key]]
+ sanitized_tokens[key] = stack
+ sanitized_encodings = [e for _, item in tokens_and_encodings for e in item]
+
+ # If returning overflowing tokens, we need to return a mapping
+ # from the batch idx to the original sample
+ if return_overflowing_tokens:
+ overflow_to_sample_mapping = []
+ for i, (toks, _) in enumerate(tokens_and_encodings):
+ overflow_to_sample_mapping += [i] * len(toks["input_ids"])
+ sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping
+
+ for input_ids in sanitized_tokens["input_ids"]:
+ self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose)
+
+ # create the token boxes
+ token_boxes = []
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
+ if return_overflowing_tokens:
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
+ else:
+ original_index = batch_index
+ token_boxes_example = []
+ for id, sequence_id, word_id in zip(
+ sanitized_tokens["input_ids"][batch_index],
+ sanitized_encodings[batch_index].sequence_ids,
+ sanitized_encodings[batch_index].word_ids,
+ ):
+ if word_id is not None:
+ if is_pair and sequence_id == 0:
+ token_boxes_example.append(self.pad_token_box)
+ else:
+ token_boxes_example.append(boxes[original_index][word_id])
+ else:
+ if id == self.cls_token_id:
+ token_boxes_example.append(self.cls_token_box)
+ elif id == self.sep_token_id:
+ token_boxes_example.append(self.sep_token_box)
+ elif id == self.pad_token_id:
+ token_boxes_example.append(self.pad_token_box)
+ else:
+ raise ValueError("Id not recognized")
+ token_boxes.append(token_boxes_example)
+
+ sanitized_tokens["bbox"] = token_boxes
+
+ # optionally, create the labels
+ if word_labels is not None:
+ labels = []
+ for batch_index in range(len(sanitized_tokens["input_ids"])):
+ if return_overflowing_tokens:
+ original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index]
+ else:
+ original_index = batch_index
+ labels_example = []
+ for id, offset, word_id in zip(
+ sanitized_tokens["input_ids"][batch_index],
+ sanitized_tokens["offset_mapping"][batch_index],
+ sanitized_encodings[batch_index].word_ids,
+ ):
+ if word_id is not None:
+ if self.only_label_first_subword:
+ if offset[0] == 0:
+ # Use the real label id for the first token of the word, and padding ids for the remaining tokens
+ labels_example.append(word_labels[original_index][word_id])
+ else:
+ labels_example.append(self.pad_token_label)
+ else:
+ labels_example.append(word_labels[original_index][word_id])
+ else:
+ labels_example.append(self.pad_token_label)
+ labels.append(labels_example)
+
+ sanitized_tokens["labels"] = labels
+ # finally, remove offsets if the user didn't want them
+ if not return_offsets_mapping:
+ del sanitized_tokens["offset_mapping"]
+
+ return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors)
+
+ def _encode_plus(
+ self,
+ text: Union[TextInput, PreTokenizedInput],
+ text_pair: Optional[PreTokenizedInput] = None,
+ boxes: Optional[List[List[int]]] = None,
+ word_labels: Optional[List[int]] = None,
+ add_special_tokens: bool = True,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE,
+ max_length: Optional[int] = None,
+ stride: int = 0,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[bool] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ # make it a batched input
+ # 2 options:
+ # 1) only text, in case text must be a list of str
+ # 2) text + text_pair, in which case text = str and text_pair a list of str
+ batched_input = [(text, text_pair)] if text_pair else [text]
+ batched_boxes = [boxes]
+ batched_word_labels = [word_labels] if word_labels is not None else None
+ batched_output = self._batch_encode_plus(
+ batched_input,
+ is_pair=bool(text_pair is not None),
+ boxes=batched_boxes,
+ word_labels=batched_word_labels,
+ add_special_tokens=add_special_tokens,
+ padding_strategy=padding_strategy,
+ truncation_strategy=truncation_strategy,
+ max_length=max_length,
+ stride=stride,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ # Return tensor is None, then we can remove the leading batch axis
+ # Overflowing tokens are returned as a batch of output so we keep them in this case
+ if return_tensors is None and not return_overflowing_tokens:
+ batched_output = BatchEncoding(
+ {
+ key: value[0] if len(value) > 0 and isinstance(value[0], list) else value
+ for key, value in batched_output.items()
+ },
+ batched_output.encodings,
+ )
+
+ self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose)
+
+ return batched_output
+
+ def _pad(
+ self,
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
+ max_length: Optional[int] = None,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ ) -> dict:
+ """
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
+
+ Args:
+ encoded_inputs:
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
+ max_length: maximum length of the returned list and optionally padding length (see below).
+ Will truncate by taking into account the special tokens.
+ padding_strategy: PaddingStrategy to use for padding.
+
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
+ The tokenizer padding sides are defined in self.padding_side:
+
+ - 'left': pads on the left of the sequences
+ - 'right': pads on the right of the sequences
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta).
+ return_attention_mask:
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
+ """
+ # Load from model defaults
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ required_input = encoded_inputs[self.model_input_names[0]]
+
+ if padding_strategy == PaddingStrategy.LONGEST:
+ max_length = len(required_input)
+
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+
+ needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length
+
+ # Initialize attention mask if not present.
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
+ encoded_inputs["attention_mask"] = [1] * len(required_input)
+
+ if needs_to_be_padded:
+ difference = max_length - len(required_input)
+ if self.padding_side == "right":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = (
+ encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference
+ )
+ if "bbox" in encoded_inputs:
+ encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
+ encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference
+ elif self.padding_side == "left":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[
+ "token_type_ids"
+ ]
+ if "bbox" in encoded_inputs:
+ encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"]
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"]
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
+ encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input
+ else:
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
+
+ return encoded_inputs
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An XLM-RoBERTa sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does
+ not make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not self.can_save_slow_tokenizer:
+ raise ValueError(
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
+ "tokenizer."
+ )
+
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+
+ return (out_vocab_file,)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/mbart/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bae4593c87d89c1e1d078e884e92db2e3d8dc2b0
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mbart/__init__.py
@@ -0,0 +1,148 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_sentencepiece_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {"configuration_mbart": ["MBART_PRETRAINED_CONFIG_ARCHIVE_MAP", "MBartConfig", "MBartOnnxConfig"]}
+
+try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_mbart"] = ["MBartTokenizer"]
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_mbart_fast"] = ["MBartTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_mbart"] = [
+ "MBART_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "MBartForCausalLM",
+ "MBartForConditionalGeneration",
+ "MBartForQuestionAnswering",
+ "MBartForSequenceClassification",
+ "MBartModel",
+ "MBartPreTrainedModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_mbart"] = [
+ "TFMBartForConditionalGeneration",
+ "TFMBartModel",
+ "TFMBartPreTrainedModel",
+ ]
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_mbart"] = [
+ "FlaxMBartForConditionalGeneration",
+ "FlaxMBartForQuestionAnswering",
+ "FlaxMBartForSequenceClassification",
+ "FlaxMBartModel",
+ "FlaxMBartPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
+
+ try:
+ if not is_sentencepiece_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_mbart import MBartTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_mbart_fast import MBartTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_mbart import (
+ MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
+ MBartForCausalLM,
+ MBartForConditionalGeneration,
+ MBartForQuestionAnswering,
+ MBartForSequenceClassification,
+ MBartModel,
+ MBartPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_mbart import (
+ FlaxMBartForConditionalGeneration,
+ FlaxMBartForQuestionAnswering,
+ FlaxMBartForSequenceClassification,
+ FlaxMBartModel,
+ FlaxMBartPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..aeebd280ba1fd57c65af38c050406d304f8cee35
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/configuration_mbart.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/configuration_mbart.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e4c11399675fd9323979bd63049d19778ee1050
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/configuration_mbart.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/convert_mbart_original_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/convert_mbart_original_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e53d1b103e567b625f1c24b941c3626a602208af
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/convert_mbart_original_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_flax_mbart.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_flax_mbart.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1080e4ea9baeb83dd313e3393456d2f1647ba755
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_flax_mbart.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_mbart.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_mbart.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c62993b41a53a6180f4f19984b23e9b236b4a51d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_mbart.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_tf_mbart.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_tf_mbart.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2ecc3c032a044d27c1202de355416596d178a818
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/modeling_tf_mbart.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5f7030fc34c00886ad7035a9d5d1af929fce6be5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2f4d409366caa1caf60b6a88b3c0184c1734ee6e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mbart/__pycache__/tokenization_mbart_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/configuration_mbart.py b/venv/lib/python3.10/site-packages/transformers/models/mbart/configuration_mbart.py
new file mode 100644
index 0000000000000000000000000000000000000000..4823047dcf31517a889fe279541c73450fc7e6d6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mbart/configuration_mbart.py
@@ -0,0 +1,386 @@
+# coding=utf-8
+# Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" MBART model configuration"""
+from collections import OrderedDict
+from typing import Any, Mapping, Optional
+
+from ... import PreTrainedTokenizer
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
+from ...onnx.utils import compute_effective_axis_dimension
+from ...utils import TensorType, is_torch_available, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class MBartConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MBartModel`]. It is used to instantiate an MBART
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the MBART
+ [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50265):
+ Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`MBartModel`] or [`TFMBartModel`].
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ classifier_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for classifier.
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ scale_embedding (`bool`, *optional*, defaults to `False`):
+ Scale embeddings by diving by sqrt(d_model).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models)
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
+ `eos_token_id`.
+
+ Example:
+
+ ```python
+ >>> from transformers import MBartConfig, MBartModel
+
+ >>> # Initializing a MBART facebook/mbart-large-cc25 style configuration
+ >>> configuration = MBartConfig()
+
+ >>> # Initializing a model (with random weights) from the facebook/mbart-large-cc25 style configuration
+ >>> model = MBartModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "mbart"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
+
+ def __init__(
+ self,
+ vocab_size=50265,
+ max_position_embeddings=1024,
+ encoder_layers=12,
+ encoder_ffn_dim=4096,
+ encoder_attention_heads=16,
+ decoder_layers=12,
+ decoder_ffn_dim=4096,
+ decoder_attention_heads=16,
+ encoder_layerdrop=0.0,
+ decoder_layerdrop=0.0,
+ use_cache=True,
+ is_encoder_decoder=True,
+ activation_function="gelu",
+ d_model=1024,
+ dropout=0.1,
+ attention_dropout=0.0,
+ activation_dropout=0.0,
+ init_std=0.02,
+ classifier_dropout=0.0,
+ scale_embedding=False,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ forced_eos_token_id=2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.classifier_dropout = classifier_dropout
+ self.use_cache = use_cache
+ self.num_hidden_layers = encoder_layers
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ forced_eos_token_id=forced_eos_token_id,
+ **kwargs,
+ )
+
+
+# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig with Bart->MBart
+class MBartOnnxConfig(OnnxSeq2SeqConfigWithPast):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ]
+ )
+
+ if self.use_past:
+ common_inputs["decoder_input_ids"] = {0: "batch"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
+ else:
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
+
+ if self.use_past:
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
+ elif self.task == "causal-lm":
+ # TODO: figure this case out.
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ]
+ )
+ if self.use_past:
+ num_encoder_layers, _ = self.num_layers
+ for i in range(num_encoder_layers):
+ common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
+ common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
+ else:
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
+ ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
+ ]
+ )
+
+ return common_inputs
+
+ @property
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_outputs = super().outputs
+ else:
+ common_outputs = super(OnnxConfigWithPast, self).outputs
+ if self.use_past:
+ num_encoder_layers, _ = self.num_layers
+ for i in range(num_encoder_layers):
+ common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
+ common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
+ return common_outputs
+
+ def _generate_dummy_inputs_for_default_and_seq2seq_lm(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, seq_length, is_pair, framework
+ )
+
+ # Generate decoder inputs
+ decoder_seq_length = seq_length if not self.use_past else 1
+ decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, decoder_seq_length, is_pair, framework
+ )
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
+
+ if self.use_past:
+ if not is_torch_available():
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
+ else:
+ import torch
+ batch, encoder_seq_length = common_inputs["input_ids"].shape
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
+ encoder_shape = (
+ batch,
+ num_encoder_attention_heads,
+ encoder_seq_length,
+ self._config.hidden_size // num_encoder_attention_heads,
+ )
+ decoder_past_length = decoder_seq_length + 3
+ decoder_shape = (
+ batch,
+ num_decoder_attention_heads,
+ decoder_past_length,
+ self._config.hidden_size // num_decoder_attention_heads,
+ )
+
+ common_inputs["decoder_attention_mask"] = torch.cat(
+ [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
+ )
+
+ common_inputs["past_key_values"] = []
+ # If the number of encoder and decoder layers are present in the model configuration, both are considered
+ num_encoder_layers, num_decoder_layers = self.num_layers
+ min_num_layers = min(num_encoder_layers, num_decoder_layers)
+ max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers
+ remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
+
+ for _ in range(min_num_layers):
+ common_inputs["past_key_values"].append(
+ (
+ torch.zeros(decoder_shape),
+ torch.zeros(decoder_shape),
+ torch.zeros(encoder_shape),
+ torch.zeros(encoder_shape),
+ )
+ )
+ # TODO: test this.
+ shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape
+ for _ in range(min_num_layers, max_num_layers):
+ common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape)))
+ return common_inputs
+
+ def _generate_dummy_inputs_for_causal_lm(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, seq_length, is_pair, framework
+ )
+
+ if self.use_past:
+ if not is_torch_available():
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
+ else:
+ import torch
+ batch, seqlen = common_inputs["input_ids"].shape
+ # Not using the same length for past_key_values
+ past_key_values_length = seqlen + 2
+ num_encoder_layers, _ = self.num_layers
+ num_encoder_attention_heads, _ = self.num_attention_heads
+ past_shape = (
+ batch,
+ num_encoder_attention_heads,
+ past_key_values_length,
+ self._config.hidden_size // num_encoder_attention_heads,
+ )
+
+ mask_dtype = common_inputs["attention_mask"].dtype
+ common_inputs["attention_mask"] = torch.cat(
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
+ )
+ common_inputs["past_key_values"] = [
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers)
+ ]
+ return common_inputs
+
+ def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ # Copied from OnnxConfig.generate_dummy_inputs
+ # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
+ batch_size = compute_effective_axis_dimension(
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
+ )
+
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
+ token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
+ seq_length = compute_effective_axis_dimension(
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
+ )
+
+ # Generate dummy inputs according to compute batch and sequence
+ dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
+ common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
+ return common_inputs
+
+ def generate_dummy_inputs(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+
+ elif self.task == "causal-lm":
+ common_inputs = self._generate_dummy_inputs_for_causal_lm(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+ else:
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+
+ return common_inputs
+
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
+ if self.task in ["default", "seq2seq-lm"]:
+ flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
+ else:
+ flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
+ flattened_output, name, idx, t
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb7f00bf77107ff858a6131305f2e8bf6a17654b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mbart/convert_mbart_original_checkpoint_to_pytorch.py
@@ -0,0 +1,83 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+import torch
+from torch import nn
+
+from transformers import MBartConfig, MBartForConditionalGeneration
+
+
+def remove_ignore_keys_(state_dict):
+ ignore_keys = [
+ "encoder.version",
+ "decoder.version",
+ "model.encoder.version",
+ "model.decoder.version",
+ "_float_tensor",
+ "decoder.output_projection.weight",
+ ]
+ for k in ignore_keys:
+ state_dict.pop(k, None)
+
+
+def make_linear_from_emb(emb):
+ vocab_size, emb_size = emb.weight.shape
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
+ lin_layer.weight.data = emb.weight.data
+ return lin_layer
+
+
+def convert_fairseq_mbart_checkpoint_from_disk(
+ checkpoint_path, hf_config_path="facebook/mbart-large-en-ro", finetuned=False, mbart_50=False
+):
+ state_dict = torch.load(checkpoint_path, map_location="cpu")["model"]
+ remove_ignore_keys_(state_dict)
+ vocab_size = state_dict["encoder.embed_tokens.weight"].shape[0]
+
+ mbart_config = MBartConfig.from_pretrained(hf_config_path, vocab_size=vocab_size)
+ if mbart_50 and finetuned:
+ mbart_config.activation_function = "relu"
+
+ state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"]
+ model = MBartForConditionalGeneration(mbart_config)
+ model.model.load_state_dict(state_dict)
+
+ if finetuned:
+ model.lm_head = make_linear_from_emb(model.model.shared)
+
+ return model
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
+ )
+ parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
+ parser.add_argument(
+ "--hf_config",
+ default="facebook/mbart-large-cc25",
+ type=str,
+ help="Which huggingface architecture to use: mbart-large",
+ )
+ parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
+ parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
+ args = parser.parse_args()
+ model = convert_fairseq_mbart_checkpoint_from_disk(
+ args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_50=args.mbart_50
+ )
+ model.save_pretrained(args.pytorch_dump_folder_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/modeling_flax_mbart.py b/venv/lib/python3.10/site-packages/transformers/models/mbart/modeling_flax_mbart.py
new file mode 100644
index 0000000000000000000000000000000000000000..907fd53aa1e5d3214d5e5f2feba99060cbbafe7c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mbart/modeling_flax_mbart.py
@@ -0,0 +1,1771 @@
+# coding=utf-8
+# Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Flax MBart model."""
+
+import math
+import random
+from functools import partial
+from typing import Callable, Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen import combine_masks, make_causal_mask
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+from jax.random import PRNGKey
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
+ FlaxCausalLMOutputWithCrossAttentions,
+ FlaxSeq2SeqLMOutput,
+ FlaxSeq2SeqModelOutput,
+ FlaxSeq2SeqQuestionAnsweringModelOutput,
+ FlaxSeq2SeqSequenceClassifierOutput,
+)
+from ...modeling_flax_utils import (
+ ACT2FN,
+ FlaxPreTrainedModel,
+ append_call_sample_docstring,
+ append_replace_return_docstrings,
+ overwrite_call_docstring,
+)
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_mbart import MBartConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25"
+_CONFIG_FOR_DOC = "MBartConfig"
+
+
+MBART_START_DOCSTRING = r"""
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Flax Linen
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`MBartConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
+ `jax.numpy.bfloat16` (on TPUs).
+
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
+ specified all the computation will be performed with the given `dtype`.
+
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
+ parameters.**
+
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
+ [`~FlaxPreTrainedModel.to_bf16`].
+"""
+
+MBART_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+MBART_ENCODE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+MBART_DECODE_INPUTS_DOCSTRING = r"""
+ Args:
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int) -> jnp.ndarray:
+ """
+ Shift input ids one token to the right, and wrap the last non pad token (the token) Note that MBart does not
+ have a single `decoder_start_token_id` in contrast to other Bart-like models.
+ """
+ prev_output_tokens = jnp.array(input_ids).copy()
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+
+ # replace possible -100 values in labels by `pad_token_id`
+ prev_output_tokens = jnp.where(prev_output_tokens == -100, pad_token_id, input_ids)
+ index_of_eos = (jnp.where(prev_output_tokens != pad_token_id, 1, 0).sum(axis=-1) - 1).reshape(-1, 1)
+ decoder_start_tokens = jnp.array(
+ [prev_output_tokens[i, eos_idx] for i, eos_idx in enumerate(index_of_eos)], dtype=jnp.int32
+ ).squeeze()
+
+ prev_output_tokens = prev_output_tokens.at[:, 1:].set(prev_output_tokens[:, :-1])
+ prev_output_tokens = prev_output_tokens.at[:, 0].set(decoder_start_tokens)
+
+ return prev_output_tokens
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->MBart
+class FlaxMBartAttention(nn.Module):
+ config: MBartConfig
+ embed_dim: int
+ num_heads: int
+ dropout: float = 0.0
+ causal: bool = False
+ bias: bool = True
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self) -> None:
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+
+ dense = partial(
+ nn.Dense,
+ self.embed_dim,
+ use_bias=self.bias,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
+ self.out_proj = dense()
+
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
+
+ if self.causal:
+ self.causal_mask = make_causal_mask(
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
+ )
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
+
+ @nn.compact
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
+ """
+ This function takes projected key, value states from a single input token and concatenates the states to cached
+ states from previous steps. This function is slighly adapted from the official Flax repository:
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
+ """
+ # detect if we're initializing by absence of existing cache data.
+ is_initialized = self.has_variable("cache", "cached_key")
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
+
+ if is_initialized:
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
+ # update key, value caches with our new 1d spatial slices
+ cur_index = cache_index.value
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
+ cached_key.value = key
+ cached_value.value = value
+ num_updated_cache_vectors = query.shape[1]
+ cache_index.value = cache_index.value + num_updated_cache_vectors
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
+ pad_mask = jnp.broadcast_to(
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
+ )
+ attention_mask = combine_masks(pad_mask, attention_mask)
+ return key, value, attention_mask
+
+ def __call__(
+ self,
+ hidden_states: jnp.ndarray,
+ key_value_states: Optional[jnp.ndarray] = None,
+ attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ ) -> Tuple[jnp.ndarray]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ batch_size = hidden_states.shape[0]
+
+ # get query proj
+ query_states = self.q_proj(hidden_states)
+ # get key, value proj
+ if is_cross_attention:
+ # cross_attentions
+ key_states = self.k_proj(key_value_states)
+ value_states = self.v_proj(key_value_states)
+ else:
+ # self_attention
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # handle cache prepare causal attention mask
+ if self.causal:
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
+ if self.has_variable("cache", "cached_key"):
+ mask_shift = self.variables["cache"]["cache_index"]
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ causal_mask = lax.dynamic_slice(
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
+ )
+ else:
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
+
+ # combine masks if needed
+ if attention_mask is not None and self.causal:
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
+ attention_mask = combine_masks(attention_mask, causal_mask)
+ elif self.causal:
+ attention_mask = causal_mask
+ elif attention_mask is not None:
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
+
+ # During fast autoregressive decoding, we feed one position at a time,
+ # and cache the keys and values step by step.
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
+ key_states, value_states, query_states, attention_mask
+ )
+
+ # Convert the boolean attention mask to an attention bias.
+ if attention_mask is not None:
+ # attention mask in the form of attention bias
+ attention_bias = lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
+ )
+ else:
+ attention_bias = None
+
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=attention_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ precision=None,
+ )
+
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights
+
+
+class FlaxMBartEncoderLayer(nn.Module):
+ config: MBartConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self) -> None:
+ self.embed_dim = self.config.d_model
+ self.self_attn = FlaxMBartAttention(
+ config=self.config,
+ embed_dim=self.embed_dim,
+ num_heads=self.config.encoder_attention_heads,
+ dropout=self.config.attention_dropout,
+ dtype=self.dtype,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+ self.activation_fn = ACT2FN[self.config.activation_function]
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
+ self.fc1 = nn.Dense(
+ self.config.encoder_ffn_dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.fc2 = nn.Dense(
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
+ )
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ hidden_states: jnp.ndarray,
+ attention_mask: jnp.ndarray,
+ output_attentions: bool = True,
+ deterministic: bool = True,
+ ) -> Tuple[jnp.ndarray]:
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->MBart
+class FlaxMBartEncoderLayerCollection(nn.Module):
+ config: MBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxMBartEncoderLayer(self.config, name=str(i), dtype=self.dtype)
+ for i in range(self.config.encoder_layers)
+ ]
+ self.layerdrop = self.config.encoder_layerdrop
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+
+ for encoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
+ layer_outputs = (None, None)
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ deterministic,
+ )
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ outputs = (hidden_states, all_hidden_states, all_attentions)
+
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+class FlaxMBartDecoderLayer(nn.Module):
+ config: MBartConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self) -> None:
+ self.embed_dim = self.config.d_model
+ self.self_attn = FlaxMBartAttention(
+ config=self.config,
+ embed_dim=self.embed_dim,
+ num_heads=self.config.decoder_attention_heads,
+ dropout=self.config.attention_dropout,
+ causal=True,
+ dtype=self.dtype,
+ )
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+ self.activation_fn = ACT2FN[self.config.activation_function]
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
+
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.encoder_attn = FlaxMBartAttention(
+ config=self.config,
+ embed_dim=self.embed_dim,
+ num_heads=self.config.decoder_attention_heads,
+ dropout=self.config.attention_dropout,
+ dtype=self.dtype,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.fc1 = nn.Dense(
+ self.config.decoder_ffn_dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.fc2 = nn.Dense(
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
+ )
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ hidden_states: jnp.ndarray,
+ attention_mask: jnp.ndarray,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ output_attentions: bool = True,
+ deterministic: bool = True,
+ ) -> Tuple[jnp.ndarray]:
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
+ )
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+ hidden_states, cross_attn_weights = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ )
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ return outputs
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->MBart
+class FlaxMBartDecoderLayerCollection(nn.Module):
+ config: MBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxMBartDecoderLayer(self.config, name=str(i), dtype=self.dtype)
+ for i in range(self.config.decoder_layers)
+ ]
+ self.layerdrop = self.config.decoder_layerdrop
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if not deterministic and (dropout_probability < self.layerdrop):
+ layer_outputs = (None, None, None)
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
+
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartClassificationHead with Bart->MBart
+class FlaxMBartClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ config: MBartConfig
+ inner_dim: int
+ num_classes: int
+ pooler_dropout: float
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self):
+ self.dense = nn.Dense(
+ self.inner_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
+ )
+ self.dropout = nn.Dropout(rate=self.pooler_dropout)
+ self.out_proj = nn.Dense(
+ self.num_classes,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ def __call__(self, hidden_states: jnp.ndarray, deterministic: bool):
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.dense(hidden_states)
+ hidden_states = jnp.tanh(hidden_states)
+ hidden_states = self.dropout(hidden_states, deterministic=deterministic)
+ hidden_states = self.out_proj(hidden_states)
+ return hidden_states
+
+
+class FlaxMBartEncoder(nn.Module):
+ config: MBartConfig
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+
+ embed_dim = self.config.d_model
+ self.padding_idx = self.config.pad_token_id
+ self.max_source_positions = self.config.max_position_embeddings
+ self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
+
+ # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
+ # and adjust num_embeddings appropriately. Other models don't have this hack
+ self.offset = 2
+ self.embed_positions = nn.Embed(
+ self.config.max_position_embeddings + self.offset,
+ embed_dim,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.layers = FlaxMBartEncoderLayerCollection(self.config, self.dtype)
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ input_shape = input_ids.shape
+ input_ids = input_ids.reshape(-1, input_shape[-1])
+
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(position_ids + self.offset)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = self.layernorm_embedding(hidden_states)
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+
+ outputs = self.layers(
+ hidden_states,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_states = outputs[0]
+ last_hidden_states = self.layer_norm(last_hidden_states)
+
+ # update the last element in `hidden_states` after applying `layernorm` above
+ hidden_states = None
+ if output_hidden_states:
+ hidden_states = outputs[1]
+ hidden_states = hidden_states[:-1] + (last_hidden_states,)
+
+ if not return_dict:
+ outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=last_hidden_states,
+ hidden_states=hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class FlaxMBartDecoder(nn.Module):
+ config: MBartConfig
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+
+ embed_dim = self.config.d_model
+ self.padding_idx = self.config.pad_token_id
+ self.max_target_positions = self.config.max_position_embeddings
+ self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
+
+ # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
+ # and adjust num_embeddings appropriately. Other models don't have this hack
+ self.offset = 2
+ self.embed_positions = nn.Embed(
+ self.config.max_position_embeddings + self.offset,
+ embed_dim,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ self.layers = FlaxMBartDecoderLayerCollection(self.config, self.dtype)
+ self.layernorm_embedding = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ input_shape = input_ids.shape
+ input_ids = input_ids.reshape(-1, input_shape[-1])
+
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ # embed positions
+ positions = self.embed_positions(position_ids + self.offset)
+
+ hidden_states = inputs_embeds + positions
+ hidden_states = self.layernorm_embedding(hidden_states)
+
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+
+ outputs = self.layers(
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_states = outputs[0]
+ last_hidden_states = self.layer_norm(last_hidden_states)
+
+ # update the last element in `hidden_states` after applying `layernorm` above
+ hidden_states = None
+ if output_hidden_states:
+ hidden_states = outputs[1]
+ hidden_states = hidden_states[:-1] + (last_hidden_states,)
+
+ if not return_dict:
+ outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=last_hidden_states,
+ hidden_states=hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->MBart
+class FlaxMBartModule(nn.Module):
+ config: MBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.shared = nn.Embed(
+ self.config.vocab_size,
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ dtype=self.dtype,
+ )
+
+ self.encoder = FlaxMBartEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
+ self.decoder = FlaxMBartDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
+
+ def _get_encoder_module(self):
+ return self.encoder
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return FlaxSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+class FlaxMBartPreTrainedModel(FlaxPreTrainedModel):
+ config_class = MBartConfig
+ base_model_prefix: str = "model"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: MBartConfig,
+ input_shape: Tuple[int] = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ # make sure initialization pass will work for FlaxMBartForSequenceClassificationModule
+ input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
+ attention_mask = jnp.ones_like(input_ids)
+ decoder_input_ids = input_ids
+ decoder_attention_mask = jnp.ones_like(input_ids)
+
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+ decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(
+ rngs,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ )["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartPreTrainedModel.init_cache with Bart->MBart
+ def init_cache(self, batch_size, max_length, encoder_outputs):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
+ cross-attention of the decoder.
+ """
+ # init input variables to retrieve cache
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
+ )
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ **kwargs,
+ )
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0),
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ init_cache=True,
+ method=_decoder_forward, # we only need to call the decoder to init the cache
+ )
+ return unfreeze(init_variables["cache"])
+
+ @add_start_docstrings(MBART_ENCODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=MBartConfig)
+ def encode(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ position_ids: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration
+
+ >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
+ >>> encoder_outputs = model.encode(**inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+ if position_ids is None:
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
+ encode_module = module._get_encoder_module()
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ method=_encoder_forward,
+ )
+
+ @add_start_docstrings(MBART_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=MBartConfig)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration
+
+ >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> last_decoder_hidden_states = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ if decoder_position_ids is None:
+ if past_key_values is not None:
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
+
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxMBartAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ **kwargs,
+ )
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past = outputs
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past = outputs
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+ @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
+ def __call__(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ decoder_input_ids: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ position_ids: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # prepare encoder inputs
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+ if position_ids is None:
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ # prepare decoder inputs
+ if decoder_input_ids is None:
+ decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+ if decoder_position_ids is None:
+ batch_size, sequence_length = decoder_input_ids.shape
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ )
+
+
+@add_start_docstrings(
+ "The bare MBart Model transformer outputting raw hidden-states without any specific head on top.",
+ MBART_START_DOCSTRING,
+)
+class FlaxMBartModel(FlaxMBartPreTrainedModel):
+ config: MBartConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ module_class = FlaxMBartModule
+
+
+append_call_sample_docstring(FlaxMBartModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->MBart
+class FlaxMBartForConditionalGenerationModule(nn.Module):
+ config: MBartConfig
+ dtype: jnp.dtype = jnp.float32
+ bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
+
+ def setup(self):
+ self.model = FlaxMBartModule(config=self.config, dtype=self.dtype)
+ self.lm_head = nn.Dense(
+ self.model.shared.num_embeddings,
+ use_bias=False,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
+
+ def _get_encoder_module(self):
+ return self.model.encoder
+
+ def _get_decoder_module(self):
+ return self.model.decoder
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ position_ids=position_ids,
+ decoder_position_ids=decoder_position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ hidden_states = outputs[0]
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.model.variables["params"]["shared"]["embedding"]
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
+ else:
+ lm_logits = self.lm_head(hidden_states)
+
+ lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return output
+
+ return FlaxSeq2SeqLMOutput(
+ logits=lm_logits,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The MMBart Model with a language modeling head. Can be used for summarization.", MBART_START_DOCSTRING
+)
+class FlaxMBartForConditionalGeneration(FlaxMBartPreTrainedModel):
+ module_class = FlaxMBartForConditionalGenerationModule
+ dtype: jnp.dtype = jnp.float32
+
+ @add_start_docstrings(MBART_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=MBartConfig)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration
+
+ >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> logits = outputs.logits
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ if decoder_position_ids is None:
+ if past_key_values is not None:
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
+
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxMBartAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
+ decoder_module = module._get_decoder_module()
+ outputs = decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ **kwargs,
+ )
+ hidden_states = outputs[0]
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = module.model.variables["params"]["shared"]["embedding"]
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
+ else:
+ lm_logits = module.lm_head(hidden_states)
+
+ lm_logits += module.final_logits_bias.astype(self.dtype)
+ return lm_logits, outputs
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ if past_key_values is None:
+ lm_logits, decoder_outputs = outputs
+ else:
+ (lm_logits, decoder_outputs), past = outputs
+
+ if return_dict:
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
+ logits=lm_logits,
+ hidden_states=decoder_outputs.hidden_states,
+ attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ )
+ else:
+ outputs = (lm_logits,) + decoder_outputs[1:]
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ max_length,
+ attention_mask: Optional[jax.Array] = None,
+ decoder_attention_mask: Optional[jax.Array] = None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # initializing the cache
+ batch_size, seq_length = decoder_input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since the decoder uses a causal mask, those positions are masked anyways.
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if decoder_attention_mask is not None:
+ position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
+ else:
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
+
+ return {
+ "past_key_values": past_key_values,
+ "encoder_outputs": encoder_outputs,
+ "encoder_attention_mask": attention_mask,
+ "decoder_attention_mask": extended_attention_mask,
+ "decoder_position_ids": position_ids,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
+ return model_kwargs
+
+
+FLAX_MBART_CONDITIONAL_GENERATION_DOCSTRING = r"""
+ Returns:
+
+ Summarization example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration, MBartConfig
+
+ >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
+
+ >>> ARTICLE_TO_SUMMARIZE = "Meine Freunde sind cool, aber sie essen zu viel Kuchen."
+ >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors="np")
+
+ >>> # Generate Summary
+ >>> summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5).sequences
+ >>> print(tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False))
+ ```
+
+ Mask filling example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxMBartForConditionalGeneration
+
+ >>> model = FlaxMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
+
+ >>> # de_DE is the language symbol id for German
+ >>> TXT = " Meine Freunde sind nett aber sie essen zu viel Kuchen. de_DE"
+ >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="np")["input_ids"]
+
+ >>> logits = model(input_ids).logits
+ >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero()[0].item()
+ >>> probs = logits[0, masked_index].softmax(dim=0)
+ >>> values, predictions = probs.topk(5)
+
+ >>> tokenizer.decode(predictions).split()
+ ```
+"""
+
+overwrite_call_docstring(
+ FlaxMBartForConditionalGeneration, MBART_INPUTS_DOCSTRING + FLAX_MBART_CONDITIONAL_GENERATION_DOCSTRING
+)
+append_replace_return_docstrings(
+ FlaxMBartForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
+)
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForSequenceClassificationModule with Bart->MBart
+class FlaxMBartForSequenceClassificationModule(nn.Module):
+ config: MBartConfig
+ dtype: jnp.dtype = jnp.float32
+ num_labels: Optional[int] = None
+
+ def setup(self):
+ self.model = FlaxMBartModule(config=self.config, dtype=self.dtype)
+ self.classification_head = FlaxMBartClassificationHead(
+ config=self.config,
+ inner_dim=self.config.d_model,
+ num_classes=self.num_labels if self.num_labels is not None else self.config.num_labels,
+ pooler_dropout=self.config.classifier_dropout,
+ )
+
+ def _get_encoder_module(self):
+ return self.model.encoder
+
+ def _get_decoder_module(self):
+ return self.model.decoder
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ position_ids=position_ids,
+ decoder_position_ids=decoder_position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ hidden_states = outputs[0] # last hidden state
+
+ eos_mask = jnp.where(input_ids == self.config.eos_token_id, 1, 0)
+
+ # The first condition is necessary to overcome jax._src.errors.ConcretizationTypeError during JIT compilation
+ if type(eos_mask) != jax.interpreters.partial_eval.DynamicJaxprTracer:
+ if len(jnp.unique(eos_mask.sum(1))) > 1:
+ raise ValueError("All examples must have the same number of tokens.")
+
+ if any(eos_mask.sum(1) == 0):
+ raise ValueError("There are missing tokens in input_ids")
+
+ # Ensure to keep 1 only for the last token for each example
+ eos_mask_noised = eos_mask + jnp.arange(eos_mask.shape[1]) * 1e-6
+ eos_mask = jnp.where(eos_mask_noised == eos_mask_noised.max(1).reshape(-1, 1), 1, 0)
+
+ sentence_representation = jnp.einsum("ijk, ij -> ijk", hidden_states, eos_mask).sum(1)
+ logits = self.classification_head(sentence_representation, deterministic=deterministic)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return output
+
+ return FlaxSeq2SeqSequenceClassifierOutput(
+ logits=logits,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
+ tasks.
+ """,
+ MBART_START_DOCSTRING,
+)
+class FlaxMBartForSequenceClassification(FlaxMBartPreTrainedModel):
+ module_class = FlaxMBartForSequenceClassificationModule
+ dtype = jnp.float32
+
+
+append_call_sample_docstring(
+ FlaxMBartForSequenceClassification,
+ _CHECKPOINT_FOR_DOC,
+ FlaxSeq2SeqSequenceClassifierOutput,
+ _CONFIG_FOR_DOC,
+)
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForQuestionAnsweringModule with Bart->MBart
+class FlaxMBartForQuestionAnsweringModule(nn.Module):
+ config: MBartConfig
+ dtype: jnp.dtype = jnp.float32
+ num_labels = 2
+
+ def setup(self):
+ self.model = FlaxMBartModule(config=self.config, dtype=self.dtype)
+ self.qa_outputs = nn.Dense(
+ self.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
+ )
+
+ def _get_encoder_module(self):
+ return self.model.encoder
+
+ def _get_decoder_module(self):
+ return self.model.decoder
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ position_ids=position_ids,
+ decoder_position_ids=decoder_position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = jnp.split(logits, logits.shape[-1], axis=-1)
+ start_logits = start_logits.squeeze(-1)
+ end_logits = end_logits.squeeze(-1)
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[1:]
+ return output
+
+ return FlaxSeq2SeqQuestionAnsweringModelOutput(
+ start_logits=start_logits,
+ end_logits=end_logits,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MBart Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ MBART_START_DOCSTRING,
+)
+class FlaxMBartForQuestionAnswering(FlaxMBartPreTrainedModel):
+ module_class = FlaxMBartForQuestionAnsweringModule
+ dtype = jnp.float32
+
+
+append_call_sample_docstring(
+ FlaxMBartForQuestionAnswering,
+ _CHECKPOINT_FOR_DOC,
+ FlaxSeq2SeqQuestionAnsweringModelOutput,
+ _CONFIG_FOR_DOC,
+)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/modeling_mbart.py b/venv/lib/python3.10/site-packages/transformers/models/mbart/modeling_mbart.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc23e2c675dbf2922feb94e8f25d62a6d7086801
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mbart/modeling_mbart.py
@@ -0,0 +1,2131 @@
+# coding=utf-8
+# Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch MBART model."""
+import copy
+import math
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+ Seq2SeqQuestionAnsweringModelOutput,
+ Seq2SeqSequenceClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_flash_attn_2_available,
+ is_flash_attn_greater_or_equal_2_10,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_mbart import MBartConfig
+
+
+if is_flash_attn_2_available():
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25"
+_CONFIG_FOR_DOC = "MBartConfig"
+
+# Base model docstring
+_EXPECTED_OUTPUT_SHAPE = [1, 8, 1024]
+
+
+# Copied from transformers.models.llama.modeling_llama._get_unpad_data
+def _get_unpad_data(attention_mask):
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
+ return (
+ indices,
+ cu_seqlens,
+ max_seqlen_in_batch,
+ )
+
+
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int):
+ """
+ Shift input ids one token to the right, and wrap the last non pad token (the token) Note that MBart does not
+ have a single `decoder_start_token_id` in contrast to other Bart-like models.
+ """
+ prev_output_tokens = input_ids.clone()
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ prev_output_tokens.masked_fill_(prev_output_tokens == -100, pad_token_id)
+
+ index_of_eos = (prev_output_tokens.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
+ decoder_start_tokens = prev_output_tokens.gather(1, index_of_eos).squeeze()
+ prev_output_tokens[:, 1:] = prev_output_tokens[:, :-1].clone()
+ prev_output_tokens[:, 0] = decoder_start_tokens
+
+ return prev_output_tokens
+
+
+# Copied from transformers.models.bart.modeling_bart.BartLearnedPositionalEmbedding with Bart->MBart
+class MBartLearnedPositionalEmbedding(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int):
+ # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
+ # and adjust num_embeddings appropriately. Other models don't have this hack
+ self.offset = 2
+ super().__init__(num_embeddings + self.offset, embedding_dim)
+
+ def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0):
+ """`input_ids' shape is expected to be [bsz x seqlen]."""
+
+ bsz, seq_len = input_ids.shape[:2]
+ positions = torch.arange(
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
+ ).expand(bsz, -1)
+
+ return super().forward(positions + self.offset)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->MBart
+class MBartAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[MBartConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+# Copied from transformers.models.bart.modeling_bart.BartFlashAttention2 with Bart->MBart
+class MBartFlashAttention2(MBartAttention):
+ """
+ MBart flash attention module. This module inherits from `MBartAttention` as the weights of the module stays
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
+ flash attention and deal with padding tokens in case the input contains any of them.
+ """
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
+
+ def _reshape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # MBartFlashAttention2 attention does not support output_attentions
+ if output_attentions:
+ raise ValueError("MBartFlashAttention2 attention does not support output_attentions")
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, q_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self._reshape(self.q_proj(hidden_states), -1, bsz)
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0].transpose(1, 2)
+ value_states = past_key_value[1].transpose(1, 2)
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._reshape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._reshape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0].transpose(1, 2), key_states], dim=1)
+ value_states = torch.cat([past_key_value[1].transpose(1, 2), value_states], dim=1)
+ else:
+ # self_attention
+ key_states = self._reshape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._reshape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states.transpose(1, 2), value_states.transpose(1, 2))
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value[0].shape[-2]
+
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
+ # cast them back in the correct dtype just to be sure everything works as expected.
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
+ # in fp32. (LlamaRMSNorm handles it correctly)
+
+ input_dtype = query_states.dtype
+ if input_dtype == torch.float32:
+ if torch.is_autocast_enabled():
+ target_dtype = torch.get_autocast_gpu_dtype()
+ # Handle the case where the model is quantized
+ elif hasattr(self.config, "_pre_quantization_dtype"):
+ target_dtype = self.config._pre_quantization_dtype
+ else:
+ target_dtype = self.q_proj.weight.dtype
+
+ logger.warning_once(
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
+ f" {target_dtype}."
+ )
+
+ query_states = query_states.to(target_dtype)
+ key_states = key_states.to(target_dtype)
+ value_states = value_states.to(target_dtype)
+
+ attn_output = self._flash_attention_forward(
+ query_states, key_states, value_states, attention_mask, q_len, dropout=self.dropout
+ )
+
+ attn_output = attn_output.reshape(bsz, q_len, -1)
+ attn_output = self.out_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, attn_weights, past_key_value
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward
+ def _flash_attention_forward(
+ self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
+ ):
+ """
+ Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
+ first unpad the input, then computes the attention scores and pad the final attention scores.
+
+ Args:
+ query_states (`torch.Tensor`):
+ Input query states to be passed to Flash Attention API
+ key_states (`torch.Tensor`):
+ Input key states to be passed to Flash Attention API
+ value_states (`torch.Tensor`):
+ Input value states to be passed to Flash Attention API
+ attention_mask (`torch.Tensor`):
+ The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
+ position of padding tokens and 1 for the position of non-padding tokens.
+ dropout (`float`):
+ Attention dropout
+ softmax_scale (`float`, *optional*):
+ The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
+ """
+ if not self._flash_attn_uses_top_left_mask:
+ causal = self.is_causal
+ else:
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
+ causal = self.is_causal and query_length != 1
+
+ # Contains at least one padding token in the sequence
+ if attention_mask is not None:
+ batch_size = query_states.shape[0]
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
+ query_states, key_states, value_states, attention_mask, query_length
+ )
+
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
+
+ attn_output_unpad = flash_attn_varlen_func(
+ query_states,
+ key_states,
+ value_states,
+ cu_seqlens_q=cu_seqlens_q,
+ cu_seqlens_k=cu_seqlens_k,
+ max_seqlen_q=max_seqlen_in_batch_q,
+ max_seqlen_k=max_seqlen_in_batch_k,
+ dropout_p=dropout,
+ softmax_scale=softmax_scale,
+ causal=causal,
+ )
+
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
+ else:
+ attn_output = flash_attn_func(
+ query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
+ )
+
+ return attn_output
+
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
+
+ key_layer = index_first_axis(
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ value_layer = index_first_axis(
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
+ )
+ if query_length == kv_seq_len:
+ query_layer = index_first_axis(
+ query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
+ )
+ cu_seqlens_q = cu_seqlens_k
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
+ indices_q = indices_k
+ elif query_length == 1:
+ max_seqlen_in_batch_q = 1
+ cu_seqlens_q = torch.arange(
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
+ ) # There is a memcpy here, that is very bad.
+ indices_q = cu_seqlens_q[:-1]
+ query_layer = query_layer.squeeze(1)
+ else:
+ # The -q_len: slice assumes left padding.
+ attention_mask = attention_mask[:, -query_length:]
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
+
+ return (
+ query_layer,
+ key_layer,
+ value_layer,
+ indices_q,
+ (cu_seqlens_q, cu_seqlens_k),
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
+ )
+
+
+MBART_ATTENTION_CLASSES = {
+ "eager": MBartAttention,
+ "flash_attention_2": MBartFlashAttention2,
+}
+
+
+class MBartEncoderLayer(nn.Module):
+ def __init__(self, config: MBartConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = MBART_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ config=config,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ layer_head_mask: torch.Tensor,
+ output_attentions: bool = False,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class MBartDecoderLayer(nn.Module):
+ def __init__(self, config: MBartConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = MBART_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ is_causal=True,
+ config=config,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = MBART_ATTENTION_CLASSES[config._attn_implementation](
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ config=config,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+# Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->MBart
+class MBartClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(
+ self,
+ input_dim: int,
+ inner_dim: int,
+ num_classes: int,
+ pooler_dropout: float,
+ ):
+ super().__init__()
+ self.dense = nn.Linear(input_dim, inner_dim)
+ self.dropout = nn.Dropout(p=pooler_dropout)
+ self.out_proj = nn.Linear(inner_dim, num_classes)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.dense(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.out_proj(hidden_states)
+ return hidden_states
+
+
+class MBartPreTrainedModel(PreTrainedModel):
+ config_class = MBartConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["MBartDecoderLayer", "MBartAttention"]
+ _supports_flash_attn_2 = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ @property
+ def dummy_inputs(self):
+ pad_token = self.config.pad_token_id
+ input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
+ dummy_inputs = {
+ "attention_mask": input_ids.ne(pad_token),
+ "input_ids": input_ids,
+ }
+ return dummy_inputs
+
+
+MBART_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`MBartConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MBART_GENERATION_EXAMPLE = r"""
+ Translation example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, MBartForConditionalGeneration
+
+ >>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-en-ro")
+
+ >>> example_english_phrase = "42 is the answer"
+ >>> inputs = tokenizer(example_english_phrase, return_tensors="pt")
+
+ >>> # Translate
+ >>> generated_ids = model.generate(**inputs, num_beams=4, max_length=5)
+ >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ '42 este răspuns'
+ ```
+
+ Mask filling example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, MBartForConditionalGeneration
+
+ >>> model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
+
+ >>> # de_DE is the language symbol id for German
+ >>> TXT = " Meine Freunde sind nett aber sie essen zu viel Kuchen. de_DE"
+
+ >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="pt")["input_ids"]
+ >>> logits = model(input_ids).logits
+
+ >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
+ >>> probs = logits[0, masked_index].softmax(dim=0)
+ >>> values, predictions = probs.topk(5)
+
+ >>> tokenizer.decode(predictions).split()
+ ['nett', 'sehr', 'ganz', 'nicht', 'so']
+ ```
+"""
+
+MBART_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ MBart uses a specific language id token as the starting token for `decoder_input_ids` generation that
+ varies according to source and target language, *e.g.* 25004 for *en_XX*, and 25003 for *de_DE*. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class MBartEncoder(MBartPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`MBartEncoderLayer`].
+
+ Args:
+ config: MBartConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+
+ embed_dim = config.d_model
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
+
+ if embed_tokens is not None:
+ self.embed_tokens.weight = embed_tokens.weight
+
+ self.embed_positions = MBartLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ embed_dim,
+ )
+ self.layers = nn.ModuleList([MBartEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+ self.layernorm_embedding = nn.LayerNorm(embed_dim)
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def _backward_compatibility_gradient_checkpointing(self):
+ # Override to not delete the attribute from the config
+ if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False):
+ self.gradient_checkpointing_enable()
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input = input_ids
+ input_shape = input.shape
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input = inputs_embeds[:, :, -1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(input)
+
+ hidden_states = inputs_embeds + embed_pos.to(inputs_embeds.device)
+ hidden_states = self.layernorm_embedding(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ if self._use_flash_attention_2:
+ attention_mask = attention_mask if 0 in attention_mask else None
+ else:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class MBartDecoder(MBartPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`MBartDecoderLayer`]
+
+ Args:
+ config: MBartConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: MBartConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_target_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
+
+ if embed_tokens is not None:
+ self.embed_tokens.weight = embed_tokens.weight
+
+ self.embed_positions = MBartLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ )
+ self.layers = nn.ModuleList([MBartDecoderLayer(config) for _ in range(config.decoder_layers)])
+ self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
+ self.layernorm_embedding = nn.LayerNorm(config.d_model)
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input = input_ids
+ input_shape = input.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ input = inputs_embeds[:, :, -1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ if self._use_flash_attention_2:
+ # 2d mask is passed through the layers
+ attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
+ else:
+ # 4d mask is passed through the layers
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ if self._use_flash_attention_2:
+ encoder_attention_mask = encoder_attention_mask if 0 in encoder_attention_mask else None
+ else:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # embed positions
+ positions = self.embed_positions(input, past_key_values_length)
+
+ hidden_states = inputs_embeds + positions.to(inputs_embeds.device)
+ hidden_states = self.layernorm_embedding(hidden_states)
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {attn_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare MBART Model outputting raw hidden-states without any specific head on top.",
+ MBART_START_DOCSTRING,
+)
+class MBartModel(MBartPreTrainedModel):
+ _tied_weights_keys = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"]
+
+ def __init__(self, config: MBartConfig):
+ super().__init__(config)
+
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
+
+ self.encoder = MBartEncoder(config, self.shared)
+ self.decoder = MBartDecoder(config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, value):
+ self.shared = value
+ self.encoder.embed_tokens = self.shared
+ self.decoder.embed_tokens = self.shared
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.encoder.embed_tokens, self.get_input_embeddings())
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.get_input_embeddings())
+
+ @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=Seq2SeqModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
+ )
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqModelOutput, Tuple[torch.FloatTensor]]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # different to other models, MBart automatically creates decoder_input_ids from
+ # input_ids if no decoder_input_ids are provided
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.",
+ MBART_START_DOCSTRING,
+)
+class MBartForConditionalGeneration(MBartPreTrainedModel):
+ base_model_prefix = "model"
+ _keys_to_ignore_on_load_missing = ["final_logits_bias"]
+ _tied_weights_keys = ["model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(self, config: MBartConfig):
+ super().__init__(config)
+ self.model = MBartModel(config)
+ self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
+ return new_embeddings
+
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
+ old_num_tokens = self.final_logits_bias.shape[-1]
+ if new_num_tokens <= old_num_tokens:
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
+ else:
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
+ self.register_buffer("final_logits_bias", new_bias)
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(MBART_GENERATION_EXAMPLE)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Seq2SeqLMOutput, Tuple[torch.FloatTensor]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if decoder_input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
+
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return shift_tokens_right(labels, self.config.pad_token_id)
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
+ + layer_past[2:],
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ MBart model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
+ tasks.
+ """,
+ MBART_START_DOCSTRING,
+)
+class MBartForSequenceClassification(MBartPreTrainedModel):
+ _tied_weights_keys = ["model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight"]
+
+ def __init__(self, config: MBartConfig, **kwargs):
+ super().__init__(config, **kwargs)
+ self.model = MBartModel(config)
+ self.classification_head = MBartClassificationHead(
+ config.d_model,
+ config.d_model,
+ config.num_labels,
+ config.classifier_dropout,
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=Seq2SeqSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ # Copied from transformers.models.bart.modeling_bart.BartForSequenceClassification.forward
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if labels is not None:
+ use_cache = False
+
+ if input_ids is None and inputs_embeds is not None:
+ raise NotImplementedError(
+ f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
+ )
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = outputs[0] # last hidden state
+
+ eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device)
+
+ if len(torch.unique_consecutive(eos_mask.sum(1))) > 1:
+ raise ValueError("All examples must have the same number of tokens.")
+ sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[
+ :, -1, :
+ ]
+ logits = self.classification_head(sentence_representation)
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ if self.config.problem_type is None:
+ if self.config.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.config.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return Seq2SeqSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MBART Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ MBART_START_DOCSTRING,
+)
+class MBartForQuestionAnswering(MBartPreTrainedModel):
+ _tied_weights_keys = ["model.encoder.embed_tokens.weight", "model.decoder.embed_tokens.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ config.num_labels = 2
+ self.num_labels = config.num_labels
+
+ self.model = MBartModel(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=Seq2SeqQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ # Copied from transformers.models.bart.modeling_bart.BartForQuestionAnswering.forward
+ def forward(
+ self,
+ input_ids: torch.Tensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[List[torch.FloatTensor]] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, Seq2SeqQuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ if start_positions is not None and end_positions is not None:
+ use_cache = False
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (
+ start_logits,
+ end_logits,
+ ) + outputs[1:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return Seq2SeqQuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+
+# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->MBart
+class MBartDecoderWrapper(MBartPreTrainedModel):
+ """
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
+ used in combination with the [`EncoderDecoderModel`] framework.
+ """
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.decoder = MBartDecoder(config)
+
+ def forward(self, *args, **kwargs):
+ return self.decoder(*args, **kwargs)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->MBart, facebook/bart-base->facebook/mbart-large-cc25
+class MBartForCausalLM(MBartPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ config = copy.deepcopy(config)
+ config.is_decoder = True
+ config.is_encoder_decoder = False
+ super().__init__(config)
+ self.model = MBartDecoderWrapper(config)
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model.decoder = decoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ if the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, MBartForCausalLM
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
+ >>> model = MBartForCausalLM.from_pretrained("facebook/mbart-large-cc25", add_cross_attention=False)
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> logits = outputs.logits
+ >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
+ >>> list(logits.shape) == expected_shape
+ True
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ logits = self.lm_head(outputs[0])
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
+ ):
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_ids.shape)
+
+ if past_key_values:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
+ "attention_mask": attention_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/modeling_tf_mbart.py b/venv/lib/python3.10/site-packages/transformers/models/mbart/modeling_tf_mbart.py
new file mode 100644
index 0000000000000000000000000000000000000000..2c134b520d4300827f16fee16cacf1c04be6c0f7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mbart/modeling_tf_mbart.py
@@ -0,0 +1,1573 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 MBart model."""
+
+
+from __future__ import annotations
+
+import random
+from typing import Optional, Tuple, Union
+
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPastAndCrossAttentions,
+ TFSeq2SeqLMOutput,
+ TFSeq2SeqModelOutput,
+)
+
+# Public API
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFModelInputType,
+ TFPreTrainedModel,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_mbart import MBartConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25"
+_CONFIG_FOR_DOC = "MBartConfig"
+
+
+LARGE_NEGATIVE = -1e8
+
+
+def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int):
+ """
+ Shift input ids one token to the right, and wrap the last non pad token (the token) Note that MBart does not
+ have a single `decoder_start_token_id` in contrast to other Bart-like models.
+ """
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ input_ids = tf.where(
+ input_ids == -100, tf.fill(shape_list(input_ids), tf.cast(pad_token_id, input_ids.dtype)), input_ids
+ )
+ language_id_index = (
+ tf.reduce_sum(tf.cast(tf.math.not_equal(input_ids, pad_token_id), dtype=input_ids.dtype), axis=-1) - 1
+ )
+ language_id_index = tf.stack(
+ [tf.range(shape_list(input_ids)[0], dtype=input_ids.dtype), language_id_index], axis=-1
+ )
+ languages_ids = tf.gather_nd(input_ids, language_id_index)
+
+ shifted_input_ids = tf.concat([tf.expand_dims(languages_ids, axis=-1), input_ids[:, :-1]], axis=-1)
+
+ return shifted_input_ids
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
+def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
+ """
+ Make causal mask used for bi-directional self-attention.
+ """
+ bsz = input_ids_shape[0]
+ tgt_len = input_ids_shape[1]
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
+ mask_cond = tf.range(shape_list(mask)[-1])
+
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
+
+ if past_key_values_length > 0:
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
+
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
+def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ src_len = shape_list(mask)[1]
+ tgt_len = tgt_len if tgt_len is not None else src_len
+ one_cst = tf.constant(1.0)
+ mask = tf.cast(mask, dtype=one_cst.dtype)
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
+
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.TFBartLearnedPositionalEmbedding with Bart->MBart
+class TFMBartLearnedPositionalEmbedding(keras.layers.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
+ # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2
+ # and adjust num_embeddings appropriately. Other models don't have this hack
+ self.offset = 2
+ super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs)
+
+ def call(
+ self,
+ input_shape: Optional[tf.TensorShape] = None,
+ past_key_values_length: int = 0,
+ position_ids: tf.Tensor | None = None,
+ ):
+ """Input is expected to be of size [bsz x seqlen]."""
+ if position_ids is None:
+ seq_len = input_shape[1]
+ position_ids = tf.range(seq_len, delta=1, name="range")
+ position_ids += past_key_values_length
+
+ offset_dtype = position_ids.dtype if isinstance(position_ids, tf.Tensor) else tf.int32
+ return super().call(position_ids + tf.constant(self.offset, dtype=offset_dtype))
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->MBart
+class TFMBartAttention(keras.layers.Layer):
+ """Multi-headed attention from "Attention Is All You Need"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.embed_dim = embed_dim
+
+ self.num_heads = num_heads
+ self.dropout = keras.layers.Dropout(dropout)
+ self.head_dim = embed_dim // num_heads
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
+
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ key_value_states: tf.Tensor | None = None,
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
+ attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
+ key_states = tf.reshape(key_states, proj_shape)
+ value_states = tf.reshape(value_states, proj_shape)
+
+ src_len = shape_list(key_states)[1]
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_weights),
+ [bsz * self.num_heads, tgt_len, src_len],
+ message=(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {shape_list(attn_weights)}"
+ ),
+ )
+
+ if attention_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attention_mask),
+ [bsz, 1, tgt_len, src_len],
+ message=(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {shape_list(attention_mask)}"
+ ),
+ )
+
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_weights = stable_softmax(attn_weights, axis=-1)
+
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
+ )
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_probs = self.dropout(attn_weights, training=training)
+ attn_output = tf.matmul(attn_probs, value_states)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_output),
+ [bsz * self.num_heads, tgt_len, self.head_dim],
+ message=(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {shape_list(attn_output)}"
+ ),
+ )
+
+ attn_output = tf.transpose(
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
+ )
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
+
+ attn_output = self.out_proj(attn_output)
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
+
+ return attn_output, attn_weights, past_key_value
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.embed_dim])
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.embed_dim])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.embed_dim])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.embed_dim])
+
+
+class TFMBartEncoderLayer(keras.layers.Layer):
+ def __init__(self, config: MBartConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+ self.self_attn = TFMBartAttention(
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
+ )
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ layer_head_mask: tf.Tensor,
+ training: Optional[bool] = False,
+ ):
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
+ attention_mask (`tf.Tensor`): attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ *(encoder_attention_heads,)*
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, self_attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
+ )
+
+ tf.debugging.assert_equal(
+ shape_list(hidden_states),
+ shape_list(residual),
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
+ )
+
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ return hidden_states, self_attn_weights
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+class TFMBartDecoderLayer(keras.layers.Layer):
+ def __init__(self, config: MBartConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+ self.self_attn = TFMBartAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="self_attn",
+ is_decoder=True,
+ )
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.encoder_attn = TFMBartAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="encoder_attn",
+ is_decoder=True,
+ )
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ encoder_hidden_states: tf.Tensor | None = None,
+ encoder_attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
+ past_key_value: Tuple[tf.Tensor] | None = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
+ attention_mask (`tf.Tensor`): attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`tf.Tensor`):
+ cross attention input to the layer of shape *(batch, seq_len, embed_dim)*
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ *(decoder_attention_heads,)*
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
+ *(decoder_attention_heads,)*
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ return (
+ hidden_states,
+ self_attn_weights,
+ cross_attn_weights,
+ present_key_value,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "encoder_attn", None) is not None:
+ with tf.name_scope(self.encoder_attn.name):
+ self.encoder_attn.build(None)
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+class TFMBartPreTrainedModel(TFPreTrainedModel):
+ config_class = MBartConfig
+ base_model_prefix = "model"
+
+
+MBART_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Args:
+ config ([`MBartConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MBART_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ MBart uses a specific language id token as the starting token for `decoder_input_ids` generation that
+ varies according to source and target language, *e.g.* 25004 for *en_XX*, and 25003 for *de_DE*. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
+ decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tf.FloatTensor`, *optional*):
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`). Set to `False` during training, `True` during generation
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+MBART_GENERATION_EXAMPLE = r"""
+ Translation example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, TFMBartForConditionalGeneration
+
+ >>> model = TFMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-en-ro")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-en-ro")
+
+ >>> example_english_phrase = "42 is the answer"
+ >>> inputs = tokenizer(example_english_phrase, return_tensors="tf")
+
+ >>> # Translate
+ >>> generated_ids = model.generate(**inputs, num_beams=4, max_length=5)
+ >>> tokenizer.batch_decode(generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
+ '42 este răspuns'
+ ```
+
+ Mask filling example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, TFMBartForConditionalGeneration
+ >>> import tensorflow as tf
+
+ >>> model = TFMBartForConditionalGeneration.from_pretrained("facebook/mbart-large-cc25")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25")
+
+ >>> # de_DE is the language symbol id for German
+ >>> TXT = " Meine Freunde sind nett aber sie essen zu viel Kuchen. de_DE"
+
+ >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors="tf")["input_ids"]
+ >>> logits = model(input_ids).logits
+
+ >>> masked_index = tf.where(input_ids[0] == tokenizer.mask_token_id)[0, 0]
+ >>> probs = tf.nn.softmax(logits[0, masked_index], axis=0)
+ >>> values, predictions = tf.math.top_k(probs, 5)
+
+ >>> tokenizer.decode(predictions).split()
+ ['nett', 'sehr', 'ganz', 'nicht', 'so']
+ ```
+"""
+
+
+@keras_serializable
+class TFMBartEncoder(keras.layers.Layer):
+ config_class = MBartConfig
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`TFMBartEncoderLayer`].
+
+ Args:
+ config: MBartConfig
+ """
+
+ def __init__(self, config: MBartConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.layerdrop = config.encoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_position_embeddings
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
+
+ self.embed_tokens = embed_tokens
+ self.embed_positions = TFMBartLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ name="embed_positions",
+ )
+ self.layers = [TFMBartEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
+ self.embed_dim = config.d_model
+
+ def get_embed_tokens(self):
+ return self.embed_tokens
+
+ def set_embed_tokens(self, embed_tokens):
+ self.embed_tokens = embed_tokens
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ """
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
+ in the config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
+ will be used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
+ in eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+ """
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(input_shape)
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = self.layernorm_embedding(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # check attention mask and invert
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _expand_mask(attention_mask)
+ else:
+ attention_mask = None
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(head_mask)[0],
+ len(self.layers),
+ message=(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(head_mask)[0]}."
+ ),
+ )
+
+ # encoder layers
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if training and (dropout_probability < self.layerdrop): # skip the layer
+ continue
+
+ hidden_states, attn = encoder_layer(
+ hidden_states,
+ attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ )
+
+ if output_attentions:
+ all_attentions += (attn,)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layernorm_embedding", None) is not None:
+ with tf.name_scope(self.layernorm_embedding.name):
+ self.layernorm_embedding.build([None, None, self.embed_dim])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFMBartDecoder(keras.layers.Layer):
+ config_class = MBartConfig
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFMBartDecoderLayer`]
+
+ Args:
+ config: MBartConfig
+ embed_tokens: output embedding
+ """
+
+ def __init__(self, config: MBartConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.padding_idx = config.pad_token_id
+ self.embed_tokens = embed_tokens
+ self.layerdrop = config.decoder_layerdrop
+ self.embed_positions = TFMBartLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ name="embed_positions",
+ )
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
+ self.layers = [TFMBartDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
+ self.layernorm_embedding = keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding")
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
+
+ self.dropout = keras.layers.Dropout(config.dropout)
+
+ def get_embed_tokens(self):
+ return self.embed_tokens
+
+ def set_embed_tokens(self, embed_tokens):
+ self.embed_tokens = embed_tokens
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType = None,
+ inputs_embeds: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ position_ids: tf.Tensor | None = None,
+ encoder_hidden_states: tf.Tensor | None = None,
+ encoder_attention_mask: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ cross_attn_head_mask: tf.Tensor | None = None,
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[
+ TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor, tf.Tensor]
+ ]:
+ r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
+ decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
+ in the config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
+ will be used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
+ in eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+ """
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
+
+ # embed positions
+ if position_ids is None:
+ positions = self.embed_positions(input_shape, past_key_values_length)
+ else:
+ positions = self.embed_positions(input_shape, position_ids=position_ids)
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ hidden_states = inputs_embeds
+
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ if input_shape[-1] > 1:
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
+ else:
+ combined_attention_mask = _expand_mask(
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
+ )
+
+ if attention_mask is not None:
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
+
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
+
+ hidden_states = self.layernorm_embedding(hidden_states + positions)
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
+ present_key_values = () if use_cache else None
+
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
+ if attn_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attn_mask)[0],
+ len(self.layers),
+ message=(
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(attn_mask)[0]}."
+ ),
+ )
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ dropout_probability = random.uniform(0, 1)
+
+ if training and (dropout_probability < self.layerdrop):
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
+ hidden_states,
+ attention_mask=combined_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
+ cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ past_key_value=past_key_value,
+ )
+
+ if use_cache:
+ present_key_values += (present_key_value,)
+
+ if output_attentions:
+ all_self_attns += (layer_self_attn,)
+
+ if encoder_hidden_states is not None:
+ all_cross_attns += (layer_cross_attn,)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if not return_dict:
+ return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
+ else:
+ return TFBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=present_key_values,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layernorm_embedding", None) is not None:
+ with tf.name_scope(self.layernorm_embedding.name):
+ self.layernorm_embedding.build([None, None, self.config.d_model])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFMBartMainLayer(keras.layers.Layer):
+ config_class = MBartConfig
+
+ def __init__(self, config: MBartConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.shared = keras.layers.Embedding(
+ input_dim=config.vocab_size,
+ output_dim=config.d_model,
+ embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std),
+ name="model.shared",
+ )
+ # Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
+ self.shared.load_weight_prefix = "model.shared"
+
+ self.encoder = TFMBartEncoder(config, self.shared, name="encoder")
+ self.decoder = TFMBartDecoder(config, self.shared, name="decoder")
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.embed_tokens = self.shared
+ self.decoder.embed_tokens = self.shared
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType = None,
+ attention_mask: tf.Tensor | None = None,
+ decoder_input_ids: tf.Tensor | None = None,
+ decoder_attention_mask: tf.Tensor | None = None,
+ decoder_position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ decoder_head_mask: tf.Tensor | None = None,
+ cross_attn_head_mask: tf.Tensor | None = None,
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ decoder_inputs_embeds: tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ **kwargs,
+ ) -> Union[TFSeq2SeqModelOutput, tf.Tensor]:
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ use_cache = False
+
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ if decoder_input_ids is None and input_ids is not None:
+ decoder_input_ids = shift_tokens_right(input_ids, self.config.pad_token_id)
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
+ encoder_outputs = TFBaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
+ encoder_outputs = encoder_outputs.to_tuple()
+
+ decoder_outputs = self.decoder(
+ decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ # The shared/tied weights expect to be in the model base namespace
+ # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than
+ # the current one.
+ with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
+ self.shared.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "decoder", None) is not None:
+ with tf.name_scope(self.decoder.name):
+ self.decoder.build(None)
+
+
+@add_start_docstrings(
+ "The bare MBART Model outputting raw hidden-states without any specific head on top.",
+ MBART_START_DOCSTRING,
+)
+class TFMBartModel(TFMBartPreTrainedModel):
+ def __init__(self, config: MBartConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.model = TFMBartMainLayer(config, name="model")
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSeq2SeqModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType = None,
+ attention_mask: tf.Tensor | None = None,
+ decoder_input_ids: tf.Tensor | None = None,
+ decoder_attention_mask: tf.Tensor | None = None,
+ decoder_position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ decoder_head_mask: tf.Tensor | None = None,
+ cross_attn_head_mask: tf.Tensor | None = None,
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ decoder_inputs_embeds: tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ **kwargs,
+ ) -> Union[TFSeq2SeqModelOutput, Tuple[tf.Tensor]]:
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=output.last_hidden_state,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
+class BiasLayer(keras.layers.Layer):
+ """
+ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
+ so all weights have to be registered in a layer.
+ """
+
+ def __init__(self, shape, initializer, trainable, name, **kwargs):
+ super().__init__(name=name, **kwargs)
+ # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
+ # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
+ # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
+ self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
+
+ def call(self, x):
+ return x + self.bias
+
+
+@add_start_docstrings(
+ "The MBART Model with a language modeling head. Can be used for summarization, after fine-tuning the pretrained models.",
+ MBART_START_DOCSTRING,
+)
+class TFMBartForConditionalGeneration(TFMBartPreTrainedModel, TFCausalLanguageModelingLoss):
+ _keys_to_ignore_on_load_unexpected = [
+ r"model.encoder.embed_tokens.weight",
+ r"model.decoder.embed_tokens.weight",
+ ]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.model = TFMBartMainLayer(config, name="model")
+ self.use_cache = config.use_cache
+ # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
+ self.bias_layer = BiasLayer(
+ name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
+ )
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_output_embeddings(self):
+ return self.get_input_embeddings()
+
+ def set_output_embeddings(self, value):
+ self.set_input_embeddings(value)
+
+ def get_bias(self):
+ return {"final_logits_bias": self.bias_layer.bias}
+
+ def set_bias(self, value):
+ # Replaces the existing layers containing bias for correct (de)serialization.
+ vocab_size = value["final_logits_bias"].shape[-1]
+ self.bias_layer = BiasLayer(
+ name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
+ )
+ self.bias_layer.bias.assign(value["final_logits_bias"])
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(MBART_GENERATION_EXAMPLE)
+ def call(
+ self,
+ input_ids: TFModelInputType = None,
+ attention_mask: tf.Tensor | None = None,
+ decoder_input_ids: tf.Tensor | None = None,
+ decoder_attention_mask: tf.Tensor | None = None,
+ decoder_position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ decoder_head_mask: tf.Tensor | None = None,
+ cross_attn_head_mask: tf.Tensor | None = None,
+ encoder_outputs: Optional[TFBaseModelOutput] = None,
+ past_key_values: Tuple[Tuple[tf.Tensor]] = None,
+ inputs_embeds: tf.Tensor | None = None,
+ decoder_inputs_embeds: tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSeq2SeqLMOutput, Tuple[tf.Tensor]]:
+ """
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ """
+
+ if labels is not None:
+ labels = tf.where(
+ labels == self.config.pad_token_id,
+ tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
+ labels,
+ )
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(labels, self.config.pad_token_id)
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
+ lm_logits = self.bias_layer(lm_logits)
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+ return TFSeq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values, # index 1 of d outputs
+ decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
+ decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
+ cross_attentions=outputs.cross_attentions, # index 4 of d outputs
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
+ encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
+ encoder_attentions=outputs.encoder_attentions, # 2 of e out
+ )
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqLMOutput(
+ logits=output.logits,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ decoder_attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ if decoder_attention_mask is not None: # xla
+ decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
+ elif past_key_values is not None: # no xla + past_key_values
+ decoder_position_ids = past_key_values[0][0].shape[2]
+ else: # no xla + no past_key_values
+ decoder_position_ids = tf.range(decoder_input_ids.shape[1])
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "decoder_attention_mask": decoder_attention_mask,
+ "decoder_position_ids": decoder_position_ids,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ def prepare_decoder_input_ids_from_labels(self, labels: tf.Tensor):
+ return shift_tokens_right(labels, self.config.pad_token_id)
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+ if getattr(self, "bias_layer", None) is not None:
+ with tf.name_scope(self.bias_layer.name):
+ self.bias_layer.build(None)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/tokenization_mbart.py b/venv/lib/python3.10/site-packages/transformers/models/mbart/tokenization_mbart.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9da6cb45cb388fe8c89d3fb1137403df75bce77
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mbart/tokenization_mbart.py
@@ -0,0 +1,337 @@
+# coding=utf-8
+# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from shutil import copyfile
+from typing import Any, Dict, List, Optional, Tuple
+
+import sentencepiece as spm
+
+from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+SPIECE_UNDERLINE = "▁"
+
+VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"}
+
+
+FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: skip
+
+
+class MBartTokenizer(PreTrainedTokenizer):
+ """
+ Construct an MBART tokenizer.
+
+ Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on
+ [SentencePiece](https://github.com/google/sentencepiece).
+
+ The tokenization method is ` ` for source language documents, and `
+ ` for target language documents.
+
+ Examples:
+
+ ```python
+ >>> from transformers import MBartTokenizer
+
+ >>> tokenizer = MBartTokenizer.from_pretrained("facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO")
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
+ >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt")
+ ```"""
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ prefix_tokens: List[int] = []
+ suffix_tokens: List[int] = []
+
+ def __init__(
+ self,
+ vocab_file,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ tokenizer_file=None,
+ src_lang=None,
+ tgt_lang=None,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ additional_special_tokens=None,
+ **kwargs,
+ ):
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = (
+ AddedToken(mask_token, lstrip=True, normalized=False) if isinstance(mask_token, str) else mask_token
+ )
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(str(vocab_file))
+ self.vocab_file = vocab_file
+
+ # Original fairseq vocab and spm vocab must be "aligned":
+ # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
+ # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
+ # fairseq | '' | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-'
+ # spm | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
+
+ # Mimic fairseq token-to-id alignment for the first 4 token
+ self.fairseq_tokens_to_ids = {"": 0, "": 1, "": 2, "": 3}
+
+ # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
+ self.fairseq_offset = 1
+
+ self.sp_model_size = len(self.sp_model)
+ self.lang_code_to_id = {
+ code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES)
+ }
+ self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()}
+ self.fairseq_tokens_to_ids[""] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset
+
+ self.fairseq_tokens_to_ids.update(self.lang_code_to_id)
+ self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
+ _additional_special_tokens = list(self.lang_code_to_id.keys())
+
+ if additional_special_tokens is not None:
+ # Only add those special tokens if they are not already there.
+ _additional_special_tokens.extend(
+ [t for t in additional_special_tokens if t not in _additional_special_tokens]
+ )
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ tokenizer_file=None,
+ src_lang=src_lang,
+ tgt_lang=tgt_lang,
+ additional_special_tokens=_additional_special_tokens,
+ sp_model_kwargs=self.sp_model_kwargs,
+ **kwargs,
+ )
+
+ self._src_lang = src_lang if src_lang is not None else "en_XX"
+ self.cur_lang_code_id = self.lang_code_to_id[self._src_lang]
+ self.tgt_lang = tgt_lang
+ self.set_src_lang_special_tokens(self._src_lang)
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sp_model"] = None
+ state["sp_model_proto"] = self.sp_model.serialized_model_proto()
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ # for backward compatibility
+ if not hasattr(self, "sp_model_kwargs"):
+ self.sp_model_kwargs = {}
+
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
+
+ @property
+ def vocab_size(self):
+ return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token
+
+ @property
+ def src_lang(self) -> str:
+ return self._src_lang
+
+ @src_lang.setter
+ def src_lang(self, new_src_lang: str) -> None:
+ self._src_lang = new_src_lang
+ self.set_src_lang_special_tokens(self._src_lang)
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ prefix_ones = [1] * len(self.prefix_tokens)
+ suffix_ones = [1] * len(self.suffix_tokens)
+ if token_ids_1 is None:
+ return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones
+ return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. An MBART sequence has the following format, where `X` represents the sequence:
+
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
+
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
+ separator.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
+ # We don't expect to process pairs, but leave the pair logic for API consistency
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def _build_translation_inputs(
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
+ ):
+ """Used by translation pipeline, to prepare inputs for the generate function"""
+ if src_lang is None or tgt_lang is None:
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
+ self.src_lang = src_lang
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
+ inputs["forced_bos_token_id"] = tgt_lang_id
+ return inputs
+
+ def get_vocab(self):
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ def _tokenize(self, text: str) -> List[str]:
+ return self.sp_model.encode(text, out_type=str)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ if token in self.fairseq_tokens_to_ids:
+ return self.fairseq_tokens_to_ids[token]
+ spm_id = self.sp_model.PieceToId(token)
+
+ # Need to return unknown token if the SP model returned 0
+ return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ if index in self.fairseq_ids_to_tokens:
+ return self.fairseq_ids_to_tokens[index]
+ return self.sp_model.IdToPiece(index - self.fairseq_offset)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (strings for sub-words) in a single string."""
+ out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+ elif not os.path.isfile(self.vocab_file):
+ with open(out_vocab_file, "wb") as fi:
+ content_spiece_model = self.sp_model.serialized_model_proto()
+ fi.write(content_spiece_model)
+
+ return (out_vocab_file,)
+
+ def prepare_seq2seq_batch(
+ self,
+ src_texts: List[str],
+ src_lang: str = "en_XX",
+ tgt_texts: Optional[List[str]] = None,
+ tgt_lang: str = "ro_RO",
+ **kwargs,
+ ) -> BatchEncoding:
+ self.src_lang = src_lang
+ self.tgt_lang = tgt_lang
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
+
+ def _switch_to_input_mode(self):
+ return self.set_src_lang_special_tokens(self.src_lang)
+
+ def _switch_to_target_mode(self):
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
+
+ def set_src_lang_special_tokens(self, src_lang) -> None:
+ """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
+ self.cur_lang_code = self.lang_code_to_id[src_lang]
+ self.prefix_tokens = []
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
+
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
+ """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
+ self.cur_lang_code = self.lang_code_to_id[lang]
+ self.prefix_tokens = []
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mbart/tokenization_mbart_fast.py b/venv/lib/python3.10/site-packages/transformers/models/mbart/tokenization_mbart_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..71107bf0cdaf47e132e4d4985503a8bb4ab732de
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mbart/tokenization_mbart_fast.py
@@ -0,0 +1,270 @@
+# coding=utf-8
+# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+from shutil import copyfile
+from typing import List, Optional, Tuple
+
+from tokenizers import processors
+
+from ...tokenization_utils import AddedToken, BatchEncoding
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import is_sentencepiece_available, logging
+
+
+if is_sentencepiece_available():
+ from .tokenization_mbart import MBartTokenizer
+else:
+ MBartTokenizer = None
+
+
+logger = logging.get_logger(__name__)
+
+
+VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
+
+
+FAIRSEQ_LANGUAGE_CODES = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"] # fmt: skip
+
+
+class MBartTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" MBART tokenizer (backed by HuggingFace's *tokenizers* library). Based on
+ [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models).
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ The tokenization method is ` ` for source language documents, and `
+ ` for target language documents.
+
+ Examples:
+
+ ```python
+ >>> from transformers import MBartTokenizerFast
+
+ >>> tokenizer = MBartTokenizerFast.from_pretrained(
+ ... "facebook/mbart-large-en-ro", src_lang="en_XX", tgt_lang="ro_RO"
+ ... )
+ >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria"
+ >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria"
+ >>> inputs = tokenizer(example_english_phrase, text_target=expected_translation_romanian, return_tensors="pt")
+ ```"""
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = MBartTokenizer
+
+ prefix_tokens: List[int] = []
+ suffix_tokens: List[int] = []
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ src_lang=None,
+ tgt_lang=None,
+ additional_special_tokens=None,
+ **kwargs,
+ ):
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ _additional_special_tokens = FAIRSEQ_LANGUAGE_CODES.copy()
+
+ if additional_special_tokens is not None:
+ # Only add those special tokens if they are not already there.
+ _additional_special_tokens.extend(
+ [t for t in additional_special_tokens if t not in _additional_special_tokens]
+ )
+
+ super().__init__(
+ vocab_file=vocab_file,
+ tokenizer_file=tokenizer_file,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ src_lang=src_lang,
+ tgt_lang=tgt_lang,
+ additional_special_tokens=_additional_special_tokens,
+ **kwargs,
+ )
+
+ self.vocab_file = vocab_file
+ self.lang_code_to_id = {
+ lang_code: self.convert_tokens_to_ids(lang_code) for lang_code in FAIRSEQ_LANGUAGE_CODES
+ }
+
+ self._src_lang = src_lang if src_lang is not None else "en_XX"
+ self.cur_lang_code = self.convert_tokens_to_ids(self._src_lang)
+ self.tgt_lang = tgt_lang
+ self.set_src_lang_special_tokens(self._src_lang)
+
+ @property
+ def can_save_slow_tokenizer(self) -> bool:
+ return os.path.isfile(self.vocab_file) if self.vocab_file else False
+
+ @property
+ def src_lang(self) -> str:
+ return self._src_lang
+
+ @src_lang.setter
+ def src_lang(self, new_src_lang: str) -> None:
+ self._src_lang = new_src_lang
+ self.set_src_lang_special_tokens(self._src_lang)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. The special tokens depend on calling set_lang.
+
+ An MBART sequence has the following format, where `X` represents the sequence:
+
+ - `input_ids` (for encoder) `X [eos, src_lang_code]`
+ - `decoder_input_ids`: (for decoder) `X [eos, tgt_lang_code]`
+
+ BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a
+ separator.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return self.prefix_tokens + token_ids_0 + self.suffix_tokens
+ # We don't expect to process pairs, but leave the pair logic for API consistency
+ return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. mBART does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+
+ """
+
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def _build_translation_inputs(
+ self, raw_inputs, return_tensors: str, src_lang: Optional[str], tgt_lang: Optional[str], **extra_kwargs
+ ):
+ """Used by translation pipeline, to prepare inputs for the generate function"""
+ if src_lang is None or tgt_lang is None:
+ raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model")
+ self.src_lang = src_lang
+ inputs = self(raw_inputs, add_special_tokens=True, return_tensors=return_tensors, **extra_kwargs)
+ tgt_lang_id = self.convert_tokens_to_ids(tgt_lang)
+ inputs["forced_bos_token_id"] = tgt_lang_id
+ return inputs
+
+ def prepare_seq2seq_batch(
+ self,
+ src_texts: List[str],
+ src_lang: str = "en_XX",
+ tgt_texts: Optional[List[str]] = None,
+ tgt_lang: str = "ro_RO",
+ **kwargs,
+ ) -> BatchEncoding:
+ self.src_lang = src_lang
+ self.tgt_lang = tgt_lang
+ return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs)
+
+ def _switch_to_input_mode(self):
+ return self.set_src_lang_special_tokens(self.src_lang)
+
+ def _switch_to_target_mode(self):
+ return self.set_tgt_lang_special_tokens(self.tgt_lang)
+
+ def set_src_lang_special_tokens(self, src_lang) -> None:
+ """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code]."""
+ self.cur_lang_code = self.convert_tokens_to_ids(src_lang)
+ self.prefix_tokens = []
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
+
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
+
+ self._tokenizer.post_processor = processors.TemplateProcessing(
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
+ )
+
+ def set_tgt_lang_special_tokens(self, lang: str) -> None:
+ """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code]."""
+ self.cur_lang_code = self.convert_tokens_to_ids(lang)
+ self.prefix_tokens = []
+ self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
+
+ prefix_tokens_str = self.convert_ids_to_tokens(self.prefix_tokens)
+ suffix_tokens_str = self.convert_ids_to_tokens(self.suffix_tokens)
+
+ self._tokenizer.post_processor = processors.TemplateProcessing(
+ single=prefix_tokens_str + ["$A"] + suffix_tokens_str,
+ pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str,
+ special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str, self.prefix_tokens + self.suffix_tokens)),
+ )
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not self.can_save_slow_tokenizer:
+ raise ValueError(
+ "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
+ "tokenizer."
+ )
+
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory.")
+ return
+ out_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+
+ if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
+ copyfile(self.vocab_file, out_vocab_file)
+
+ return (out_vocab_file,)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/tapas/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1afab325420f7cef5170e549a49f2ead66d322b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tapas/__init__.py
@@ -0,0 +1,95 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
+ "tokenization_tapas": ["TapasTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tapas"] = [
+ "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TapasForMaskedLM",
+ "TapasForQuestionAnswering",
+ "TapasForSequenceClassification",
+ "TapasModel",
+ "TapasPreTrainedModel",
+ "load_tf_weights_in_tapas",
+ ]
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_tapas"] = [
+ "TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFTapasForMaskedLM",
+ "TFTapasForQuestionAnswering",
+ "TFTapasForSequenceClassification",
+ "TFTapasModel",
+ "TFTapasPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
+ from .tokenization_tapas import TapasTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tapas import (
+ TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TapasForMaskedLM,
+ TapasForQuestionAnswering,
+ TapasForSequenceClassification,
+ TapasModel,
+ TapasPreTrainedModel,
+ load_tf_weights_in_tapas,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_tapas import (
+ TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFTapasForMaskedLM,
+ TFTapasForQuestionAnswering,
+ TFTapasForSequenceClassification,
+ TFTapasModel,
+ TFTapasPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/configuration_tapas.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/configuration_tapas.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..907135a419413c64fd3e6ffe056d1f9951818d5b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/configuration_tapas.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/convert_tapas_original_tf_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/convert_tapas_original_tf_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c8ec1d31e15019df2747d34231e46be56de64905
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/convert_tapas_original_tf_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/modeling_tapas.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/modeling_tapas.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0f33ded759d6ba0ac9a4fe6b6bc984927b26e0ea
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/modeling_tapas.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/modeling_tf_tapas.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/modeling_tf_tapas.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..84c8fb328448e3b9bae96ecfbac5818cb8c6ef04
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/modeling_tf_tapas.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/configuration_tapas.py b/venv/lib/python3.10/site-packages/transformers/models/tapas/configuration_tapas.py
new file mode 100644
index 0000000000000000000000000000000000000000..b448afd002206240d7ffd0d8c58d2c5a8c5bee39
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tapas/configuration_tapas.py
@@ -0,0 +1,228 @@
+# coding=utf-8
+# Copyright 2020 Google Research and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+TAPAS configuration. Based on the BERT configuration with added parameters.
+
+Hyperparameters are taken from run_task_main.py and hparam_utils.py of the original implementation. URLS:
+
+- https://github.com/google-research/tapas/blob/master/tapas/run_task_main.py
+- https://github.com/google-research/tapas/blob/master/tapas/utils/hparam_utils.py
+
+"""
+
+
+from ...configuration_utils import PretrainedConfig
+from ..deprecated._archive_maps import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class TapasConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`TapasModel`]. It is used to instantiate a TAPAS
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the TAPAS
+ [google/tapas-base-finetuned-sqa](https://huggingface.co/google/tapas-base-finetuned-sqa) architecture.
+
+ Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Hyperparameters additional to BERT are taken from run_task_main.py and hparam_utils.py of the original
+ implementation. Original implementation available at https://github.com/google-research/tapas/tree/master.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30522):
+ Vocabulary size of the TAPAS model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`TapasModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"swish"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ type_vocab_sizes (`List[int]`, *optional*, defaults to `[3, 256, 256, 2, 256, 256, 10]`):
+ The vocabulary sizes of the `token_type_ids` passed when calling [`TapasModel`].
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ positive_label_weight (`float`, *optional*, defaults to 10.0):
+ Weight for positive labels.
+ num_aggregation_labels (`int`, *optional*, defaults to 0):
+ The number of aggregation operators to predict.
+ aggregation_loss_weight (`float`, *optional*, defaults to 1.0):
+ Importance weight for the aggregation loss.
+ use_answer_as_supervision (`bool`, *optional*):
+ Whether to use the answer as the only supervision for aggregation examples.
+ answer_loss_importance (`float`, *optional*, defaults to 1.0):
+ Importance weight for the regression loss.
+ use_normalized_answer_loss (`bool`, *optional*, defaults to `False`):
+ Whether to normalize the answer loss by the maximum of the predicted and expected value.
+ huber_loss_delta (`float`, *optional*):
+ Delta parameter used to calculate the regression loss.
+ temperature (`float`, *optional*, defaults to 1.0):
+ Value used to control (OR change) the skewness of cell logits probabilities.
+ aggregation_temperature (`float`, *optional*, defaults to 1.0):
+ Scales aggregation logits to control the skewness of probabilities.
+ use_gumbel_for_cells (`bool`, *optional*, defaults to `False`):
+ Whether to apply Gumbel-Softmax to cell selection.
+ use_gumbel_for_aggregation (`bool`, *optional*, defaults to `False`):
+ Whether to apply Gumbel-Softmax to aggregation selection.
+ average_approximation_function (`string`, *optional*, defaults to `"ratio"`):
+ Method to calculate the expected average of cells in the weak supervision case. One of `"ratio"`,
+ `"first_order"` or `"second_order"`.
+ cell_selection_preference (`float`, *optional*):
+ Preference for cell selection in ambiguous cases. Only applicable in case of weak supervision for
+ aggregation (WTQ, WikiSQL). If the total mass of the aggregation probabilities (excluding the "NONE"
+ operator) is higher than this hyperparameter, then aggregation is predicted for an example.
+ answer_loss_cutoff (`float`, *optional*):
+ Ignore examples with answer loss larger than cutoff.
+ max_num_rows (`int`, *optional*, defaults to 64):
+ Maximum number of rows.
+ max_num_columns (`int`, *optional*, defaults to 32):
+ Maximum number of columns.
+ average_logits_per_cell (`bool`, *optional*, defaults to `False`):
+ Whether to average logits per cell.
+ select_one_column (`bool`, *optional*, defaults to `True`):
+ Whether to constrain the model to only select cells from a single column.
+ allow_empty_column_selection (`bool`, *optional*, defaults to `False`):
+ Whether to allow not to select any column.
+ init_cell_selection_weights_to_zero (`bool`, *optional*, defaults to `False`):
+ Whether to initialize cell selection weights to 0 so that the initial probabilities are 50%.
+ reset_position_index_per_cell (`bool`, *optional*, defaults to `True`):
+ Whether to restart position indexes at every cell (i.e. use relative position embeddings).
+ disable_per_token_loss (`bool`, *optional*, defaults to `False`):
+ Whether to disable any (strong or weak) supervision on cells.
+ aggregation_labels (`Dict[int, label]`, *optional*):
+ The aggregation labels used to aggregate the results. For example, the WTQ models have the following
+ aggregation labels: `{0: "NONE", 1: "SUM", 2: "AVERAGE", 3: "COUNT"}`
+ no_aggregation_label_index (`int`, *optional*):
+ If the aggregation labels are defined and one of these labels represents "No aggregation", this should be
+ set to its index. For example, the WTQ models have the "NONE" aggregation label at index 0, so that value
+ should be set to 0 for these models.
+
+
+ Example:
+
+ ```python
+ >>> from transformers import TapasModel, TapasConfig
+
+ >>> # Initializing a default (SQA) Tapas configuration
+ >>> configuration = TapasConfig()
+ >>> # Initializing a model from the configuration
+ >>> model = TapasModel(configuration)
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "tapas"
+
+ def __init__(
+ self,
+ vocab_size=30522,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=1024,
+ type_vocab_sizes=[3, 256, 256, 2, 256, 256, 10],
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ pad_token_id=0,
+ positive_label_weight=10.0,
+ num_aggregation_labels=0,
+ aggregation_loss_weight=1.0,
+ use_answer_as_supervision=None,
+ answer_loss_importance=1.0,
+ use_normalized_answer_loss=False,
+ huber_loss_delta=None,
+ temperature=1.0,
+ aggregation_temperature=1.0,
+ use_gumbel_for_cells=False,
+ use_gumbel_for_aggregation=False,
+ average_approximation_function="ratio",
+ cell_selection_preference=None,
+ answer_loss_cutoff=None,
+ max_num_rows=64,
+ max_num_columns=32,
+ average_logits_per_cell=False,
+ select_one_column=True,
+ allow_empty_column_selection=False,
+ init_cell_selection_weights_to_zero=False,
+ reset_position_index_per_cell=True,
+ disable_per_token_loss=False,
+ aggregation_labels=None,
+ no_aggregation_label_index=None,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
+
+ # BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.type_vocab_sizes = type_vocab_sizes
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+
+ # Fine-tuning task hyperparameters
+ self.positive_label_weight = positive_label_weight
+ self.num_aggregation_labels = num_aggregation_labels
+ self.aggregation_loss_weight = aggregation_loss_weight
+ self.use_answer_as_supervision = use_answer_as_supervision
+ self.answer_loss_importance = answer_loss_importance
+ self.use_normalized_answer_loss = use_normalized_answer_loss
+ self.huber_loss_delta = huber_loss_delta
+ self.temperature = temperature
+ self.aggregation_temperature = aggregation_temperature
+ self.use_gumbel_for_cells = use_gumbel_for_cells
+ self.use_gumbel_for_aggregation = use_gumbel_for_aggregation
+ self.average_approximation_function = average_approximation_function
+ self.cell_selection_preference = cell_selection_preference
+ self.answer_loss_cutoff = answer_loss_cutoff
+ self.max_num_rows = max_num_rows
+ self.max_num_columns = max_num_columns
+ self.average_logits_per_cell = average_logits_per_cell
+ self.select_one_column = select_one_column
+ self.allow_empty_column_selection = allow_empty_column_selection
+ self.init_cell_selection_weights_to_zero = init_cell_selection_weights_to_zero
+ self.reset_position_index_per_cell = reset_position_index_per_cell
+ self.disable_per_token_loss = disable_per_token_loss
+
+ # Aggregation hyperparameters
+ self.aggregation_labels = aggregation_labels
+ self.no_aggregation_label_index = no_aggregation_label_index
+
+ if isinstance(self.aggregation_labels, dict):
+ self.aggregation_labels = {int(k): v for k, v in aggregation_labels.items()}
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..2772a7f126ef9ad350837e993e264c70e68ae3fb
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py
@@ -0,0 +1,138 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert TAPAS checkpoint."""
+
+
+import argparse
+
+from transformers import (
+ TapasConfig,
+ TapasForMaskedLM,
+ TapasForQuestionAnswering,
+ TapasForSequenceClassification,
+ TapasModel,
+ TapasTokenizer,
+ load_tf_weights_in_tapas,
+)
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+
+
+def convert_tf_checkpoint_to_pytorch(
+ task, reset_position_index_per_cell, tf_checkpoint_path, tapas_config_file, pytorch_dump_path
+):
+ # Initialise PyTorch model.
+ # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
+ # TapasConfig to False.
+
+ # initialize configuration from json file
+ config = TapasConfig.from_json_file(tapas_config_file)
+ # set absolute/relative position embeddings parameter
+ config.reset_position_index_per_cell = reset_position_index_per_cell
+
+ # set remaining parameters of TapasConfig as well as the model based on the task
+ if task == "SQA":
+ model = TapasForQuestionAnswering(config=config)
+ elif task == "WTQ":
+ # run_task_main.py hparams
+ config.num_aggregation_labels = 4
+ config.use_answer_as_supervision = True
+ # hparam_utils.py hparams
+ config.answer_loss_cutoff = 0.664694
+ config.cell_selection_preference = 0.207951
+ config.huber_loss_delta = 0.121194
+ config.init_cell_selection_weights_to_zero = True
+ config.select_one_column = True
+ config.allow_empty_column_selection = False
+ config.temperature = 0.0352513
+
+ model = TapasForQuestionAnswering(config=config)
+ elif task == "WIKISQL_SUPERVISED":
+ # run_task_main.py hparams
+ config.num_aggregation_labels = 4
+ config.use_answer_as_supervision = False
+ # hparam_utils.py hparams
+ config.answer_loss_cutoff = 36.4519
+ config.cell_selection_preference = 0.903421
+ config.huber_loss_delta = 222.088
+ config.init_cell_selection_weights_to_zero = True
+ config.select_one_column = True
+ config.allow_empty_column_selection = True
+ config.temperature = 0.763141
+
+ model = TapasForQuestionAnswering(config=config)
+ elif task == "TABFACT":
+ model = TapasForSequenceClassification(config=config)
+ elif task == "MLM":
+ model = TapasForMaskedLM(config=config)
+ elif task == "INTERMEDIATE_PRETRAINING":
+ model = TapasModel(config=config)
+ else:
+ raise ValueError(f"Task {task} not supported.")
+
+ print(f"Building PyTorch model from configuration: {config}")
+ # Load weights from tf checkpoint
+ load_tf_weights_in_tapas(model, config, tf_checkpoint_path)
+
+ # Save pytorch-model (weights and configuration)
+ print(f"Save PyTorch model to {pytorch_dump_path}")
+ model.save_pretrained(pytorch_dump_path)
+
+ # Save tokenizer files
+ print(f"Save tokenizer files to {pytorch_dump_path}")
+ tokenizer = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt", model_max_length=512)
+ tokenizer.save_pretrained(pytorch_dump_path)
+
+ print("Used relative position embeddings:", model.config.reset_position_index_per_cell)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA."
+ )
+ parser.add_argument(
+ "--reset_position_index_per_cell",
+ default=False,
+ action="store_true",
+ help="Whether to use relative position embeddings or not. Defaults to True.",
+ )
+ parser.add_argument(
+ "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
+ )
+ parser.add_argument(
+ "--tapas_config_file",
+ default=None,
+ type=str,
+ required=True,
+ help=(
+ "The config json file corresponding to the pre-trained TAPAS model. \n"
+ "This specifies the model architecture."
+ ),
+ )
+ parser.add_argument(
+ "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_tf_checkpoint_to_pytorch(
+ args.task,
+ args.reset_position_index_per_cell,
+ args.tf_checkpoint_path,
+ args.tapas_config_file,
+ args.pytorch_dump_path,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/modeling_tapas.py b/venv/lib/python3.10/site-packages/transformers/models/tapas/modeling_tapas.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2ce847926b38fef2dc8c309fa4ee5525fde5918
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tapas/modeling_tapas.py
@@ -0,0 +1,2388 @@
+# coding=utf-8
+# Copyright 2020 Google Research and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch TAPAS model."""
+
+
+import enum
+import math
+import os
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import (
+ apply_chunking_to_forward,
+ find_pruneable_heads_and_indices,
+ is_torch_greater_or_equal_than_1_12,
+ prune_linear_layer,
+)
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_tapas import TapasConfig
+
+
+logger = logging.get_logger(__name__)
+
+if not is_torch_greater_or_equal_than_1_12:
+ logger.warning(
+ f"You are using torch=={torch.__version__}, but torch>=1.12.0 is required to use "
+ "TapasModel. Please upgrade torch."
+ )
+
+_CONFIG_FOR_DOC = "TapasConfig"
+_CHECKPOINT_FOR_DOC = "google/tapas-base"
+
+
+from ..deprecated._archive_maps import TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+EPSILON_ZERO_DIVISION = 1e-10
+CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0
+
+
+@dataclass
+class TableQuestionAnsweringOutput(ModelOutput):
+ """
+ Output type of [`TapasForQuestionAnswering`].
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)):
+ Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the
+ semi-supervised regression loss and (optionally) supervised loss for aggregations.
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Prediction scores of the cell selection head, for every token.
+ logits_aggregation (`torch.FloatTensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`):
+ Prediction scores of the aggregation head, for every aggregation operator.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
+ shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer
+ plus the initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ logits_aggregation: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+def load_tf_weights_in_tapas(model, config, tf_checkpoint_path):
+ """
+ Load tf checkpoints in a PyTorch model. This is an adaptation from load_tf_weights_in_bert
+
+ - add cell selection and aggregation heads
+ - take into account additional token type embedding layers
+ """
+ try:
+ import re
+
+ import numpy as np
+ import tensorflow as tf
+ except ImportError:
+ logger.error(
+ "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
+ "https://www.tensorflow.org/install/ for installation instructions."
+ )
+ raise
+ tf_path = os.path.abspath(tf_checkpoint_path)
+ logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
+ # Load weights from TF model
+ init_vars = tf.train.list_variables(tf_path)
+ names = []
+ arrays = []
+ for name, shape in init_vars:
+ logger.info(f"Loading TF weight {name} with shape {shape}")
+ array = tf.train.load_variable(tf_path, name)
+ names.append(name)
+ arrays.append(array)
+
+ for name, array in zip(names, arrays):
+ name = name.split("/")
+ # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculate m and v
+ # which are not required for using pretrained model
+ if any(
+ n
+ in [
+ "adam_v",
+ "adam_m",
+ "AdamWeightDecayOptimizer",
+ "AdamWeightDecayOptimizer_1",
+ "global_step",
+ "seq_relationship",
+ ]
+ for n in name
+ ):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ # in case the model is TapasForSequenceClassification, we skip output_bias and output_weights
+ # since these are not used for classification
+ if isinstance(model, TapasForSequenceClassification):
+ if any(n in ["output_bias", "output_weights"] for n in name):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ # in case the model is TapasModel, we skip output_bias, output_weights, output_bias_cls and output_weights_cls
+ # since this model does not have MLM and NSP heads
+ if isinstance(model, TapasModel):
+ if any(n in ["output_bias", "output_weights", "output_bias_cls", "output_weights_cls"] for n in name):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ # in case the model is TapasForMaskedLM, we skip the pooler
+ if isinstance(model, TapasForMaskedLM):
+ if any(n in ["pooler"] for n in name):
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ # if first scope name starts with "bert", change it to "tapas"
+ if name[0] == "bert":
+ name[0] = "tapas"
+ pointer = model
+ for m_name in name:
+ if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
+ scope_names = re.split(r"_(\d+)", m_name)
+ else:
+ scope_names = [m_name]
+ if scope_names[0] == "kernel" or scope_names[0] == "gamma":
+ pointer = getattr(pointer, "weight")
+ elif scope_names[0] == "beta":
+ pointer = getattr(pointer, "bias")
+ # cell selection heads
+ elif scope_names[0] == "output_bias":
+ if not isinstance(model, TapasForMaskedLM):
+ pointer = getattr(pointer, "output_bias")
+ else:
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "output_weights":
+ pointer = getattr(pointer, "output_weights")
+ elif scope_names[0] == "column_output_bias":
+ pointer = getattr(pointer, "column_output_bias")
+ elif scope_names[0] == "column_output_weights":
+ pointer = getattr(pointer, "column_output_weights")
+ # aggregation head
+ elif scope_names[0] == "output_bias_agg":
+ pointer = getattr(pointer, "aggregation_classifier")
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "output_weights_agg":
+ pointer = getattr(pointer, "aggregation_classifier")
+ pointer = getattr(pointer, "weight")
+ # classification head
+ elif scope_names[0] == "output_bias_cls":
+ pointer = getattr(pointer, "classifier")
+ pointer = getattr(pointer, "bias")
+ elif scope_names[0] == "output_weights_cls":
+ pointer = getattr(pointer, "classifier")
+ pointer = getattr(pointer, "weight")
+ else:
+ try:
+ pointer = getattr(pointer, scope_names[0])
+ except AttributeError:
+ logger.info(f"Skipping {'/'.join(name)}")
+ continue
+ if len(scope_names) >= 2:
+ num = int(scope_names[1])
+ pointer = pointer[num]
+ if m_name[-11:] == "_embeddings":
+ pointer = getattr(pointer, "weight")
+ elif m_name[-13:] in [f"_embeddings_{i}" for i in range(7)]:
+ pointer = getattr(pointer, "weight")
+ elif m_name == "kernel":
+ array = np.transpose(array)
+ try:
+ if pointer.shape != array.shape:
+ raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched")
+ except AssertionError as e:
+ e.args += (pointer.shape, array.shape)
+ raise
+ logger.info(f"Initialize PyTorch weight {name}")
+ # Added a check to see whether the array is a scalar (because bias terms in Tapas checkpoints can be
+ # scalar => should first be converted to numpy arrays)
+ if np.isscalar(array):
+ array = np.array(array)
+ pointer.data = torch.from_numpy(array)
+ return model
+
+
+class TapasEmbeddings(nn.Module):
+ """
+ Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of
+ additional token type embeddings to encode tabular structure.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ # we do not include config.disabled_features and config.disable_position_embeddings from the original implementation
+ # word embeddings
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
+ # position embeddings
+ self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
+ # token type embeddings
+ for i, type_vocab_sizes in enumerate(config.type_vocab_sizes):
+ name = f"token_type_embeddings_{i}"
+ setattr(self, name, nn.Embedding(type_vocab_sizes, config.hidden_size))
+
+ self.number_of_token_type_embeddings = len(config.type_vocab_sizes)
+
+ # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
+ # any TensorFlow checkpoint file
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ self.config = config
+
+ def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None):
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if position_ids is None:
+ # create absolute position embeddings
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device)
+ position_ids = position_ids.unsqueeze(0).expand(input_shape)
+ # when self.config.reset_position_index_per_cell is set to True, create relative position embeddings
+ if self.config.reset_position_index_per_cell:
+ # shape (batch_size, seq_len)
+ col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1)
+ # shape (batch_size, seq_len)
+ row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1)
+ # shape (batch_size, seq_len)
+ full_index = ProductIndexMap(col_index, row_index)
+ # shape (max_rows * max_columns,). First absolute position for every cell
+ first_position_per_segment = reduce_min(position_ids, full_index)[0]
+ # ? shape (batch_size, seq_len). First absolute position of the cell for every token
+ first_position = gather(first_position_per_segment, full_index)
+ # shape (1, seq_len)
+ position = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0)
+ position_ids = torch.min(
+ torch.as_tensor(self.config.max_position_embeddings - 1, device=device), position - first_position
+ )
+
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(
+ (input_shape + self.number_of_token_type_embeddings), dtype=torch.long, device=device
+ )
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+
+ position_embeddings = self.position_embeddings(position_ids)
+
+ embeddings = inputs_embeds + position_embeddings
+
+ for i in range(self.number_of_token_type_embeddings):
+ name = f"token_type_embeddings_{i}"
+ embeddings += getattr(self, name)(token_type_ids[:, :, i])
+
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+
+class TapasSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size {config.hidden_size} is not a multiple of the number of attention "
+ f"heads {config.num_attention_heads}"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+ self.is_decoder = config.is_decoder
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=None,
+ output_attentions=False,
+ ):
+ mixed_query_layer = self.query(hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
+ value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+ key_layer = torch.cat([past_key_value[0], key_layer], dim=2)
+ value_layer = torch.cat([past_key_value[1], value_layer], dim=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
+
+ query_layer = self.transpose_for_scores(mixed_query_layer)
+
+ if self.is_decoder:
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in TapasModel forward() function)
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(attention_probs)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ context_layer = torch.matmul(attention_probs, value_layer)
+
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
+ context_layer = context_layer.view(*new_context_layer_shape)
+
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertSelfOutput
+class TapasSelfOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class TapasAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.self = TapasSelfAttention(config)
+ self.output = TapasSelfOutput(config)
+ self.pruned_heads = set()
+
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
+ )
+
+ # Prune linear layers
+ self.self.query = prune_linear_layer(self.self.query, index)
+ self.self.key = prune_linear_layer(self.self.key, index)
+ self.self.value = prune_linear_layer(self.self.value, index)
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
+
+ # Update hyper params and store pruned heads
+ self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
+ self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ # Copied from transformers.models.bert.modeling_bert.BertAttention.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ self_outputs = self.self(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_value,
+ output_attentions,
+ )
+ attention_output = self.output(self_outputs[0], hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate
+class TapasIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput
+class TapasOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class TapasLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
+ self.seq_len_dim = 1
+ self.attention = TapasAttention(config)
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = TapasAttention(config)
+ self.intermediate = TapasIntermediate(config)
+ self.output = TapasOutput(config)
+
+ # Copied from transformers.models.bert.modeling_bert.BertLayer.forward
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions=output_attentions,
+ past_key_value=self_attn_past_key_value,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ attention_output,
+ attention_mask,
+ head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ cross_attn_past_key_value,
+ output_attentions,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ layer_output = apply_chunking_to_forward(
+ self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
+ )
+ outputs = (layer_output,) + outputs
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ # Copied from transformers.models.bert.modeling_bert.BertLayer.feed_forward_chunk
+ def feed_forward_chunk(self, attention_output):
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ return layer_output
+
+
+class TapasEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.layer = nn.ModuleList([TapasLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=False,
+ output_hidden_states=False,
+ return_dict=True,
+ ):
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_head_mask = head_mask[i] if head_mask is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ layer_module.__call__,
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_values,
+ output_attentions,
+ )
+ else:
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ layer_head_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ past_key_values,
+ output_attentions,
+ )
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler
+class TapasPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Tapas
+class TapasPredictionHeadTransform(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.transform_act_fn = config.hidden_act
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Tapas
+class TapasLMPredictionHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.transform = TapasPredictionHeadTransform(config)
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def forward(self, hidden_states):
+ hidden_states = self.transform(hidden_states)
+ hidden_states = self.decoder(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Tapas
+class TapasOnlyMLMHead(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.predictions = TapasLMPredictionHead(config)
+
+ def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
+ prediction_scores = self.predictions(sequence_output)
+ return prediction_scores
+
+
+class TapasPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TapasConfig
+ base_model_prefix = "tapas"
+ supports_gradient_checkpointing = True
+
+ # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+TAPAS_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`TapasConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TAPAS_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See
+ [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `({0}, 7)`, *optional*):
+ Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
+ class for more info.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. If
+ `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
+ used. Selected in the range `[0, config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1
+ indicates the head is **not masked**, - 0 indicates the head is **masked**.
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Tapas Model transformer outputting raw hidden-states without any specific head on top.",
+ TAPAS_START_DOCSTRING,
+)
+class TapasModel(TapasPreTrainedModel):
+ """
+ This class is a small change compared to [`BertModel`], taking into account the additional token type ids.
+
+ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
+ cross-attention is added between the self-attention layers, following the architecture described in [Attention is
+ all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit,
+ Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin.
+
+ """
+
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = TapasEmbeddings(config)
+ self.encoder = TapasEncoder(config)
+
+ self.pooler = TapasPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, TapasModel
+ >>> import pandas as pd
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
+ >>> model = TapasModel.from_pretrained("google/tapas-base")
+
+ >>> data = {
+ ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
+ ... "Age": ["56", "45", "59"],
+ ... "Number of movies": ["87", "53", "69"],
+ ... }
+ >>> table = pd.DataFrame.from_dict(data)
+ >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
+
+ >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(
+ (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device
+ )
+
+ # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
+ # ourselves in which case we just need to make it broadcastable to all heads.
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ # If a 2D ou 3D attention mask is provided for the cross-attention
+ # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length]
+ if self.config.is_decoder and encoder_hidden_states is not None:
+ encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
+ encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
+ if encoder_attention_mask is None:
+ encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
+ encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask)
+ else:
+ encoder_extended_attention_mask = None
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
+ )
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_extended_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings("""Tapas Model with a `language modeling` head on top.""", TAPAS_START_DOCSTRING)
+class TapasForMaskedLM(TapasPreTrainedModel):
+ _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
+ config_class = TapasConfig
+ base_model_prefix = "tapas"
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.tapas = TapasModel(config, add_pooling_layer=False)
+ self.cls = TapasOnlyMLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.cls.predictions.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.cls.predictions.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple, MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, TapasForMaskedLM
+ >>> import pandas as pd
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
+ >>> model = TapasForMaskedLM.from_pretrained("google/tapas-base")
+
+ >>> data = {
+ ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
+ ... "Age": ["56", "45", "59"],
+ ... "Number of movies": ["87", "53", "69"],
+ ... }
+ >>> table = pd.DataFrame.from_dict(data)
+
+ >>> inputs = tokenizer(
+ ... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="pt"
+ ... )
+ >>> labels = tokenizer(
+ ... table=table, queries="How many movies has George Clooney played in?", return_tensors="pt"
+ ... )["input_ids"]
+
+ >>> outputs = model(**inputs, labels=labels)
+ >>> logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.tapas(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.cls(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss() # -100 index = padding token
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables
+ (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for
+ SQA, WTQ or WikiSQL-supervised tasks.
+ """,
+ TAPAS_START_DOCSTRING,
+)
+class TapasForQuestionAnswering(TapasPreTrainedModel):
+ def __init__(self, config: TapasConfig):
+ super().__init__(config)
+
+ # base model
+ self.tapas = TapasModel(config)
+
+ # dropout (only used when training)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ # cell selection heads
+ if config.init_cell_selection_weights_to_zero:
+ # init_cell_selection_weights_to_zero: Whether the initial weights should be
+ # set to 0. This ensures that all tokens have the same prior probability.
+ self.output_weights = nn.Parameter(torch.zeros(config.hidden_size))
+ self.column_output_weights = nn.Parameter(torch.zeros(config.hidden_size))
+ else:
+ self.output_weights = nn.Parameter(torch.empty(config.hidden_size))
+ nn.init.normal_(
+ self.output_weights, std=config.initializer_range
+ ) # here, a truncated normal is used in the original implementation
+ self.column_output_weights = nn.Parameter(torch.empty(config.hidden_size))
+ nn.init.normal_(
+ self.column_output_weights, std=config.initializer_range
+ ) # here, a truncated normal is used in the original implementation
+ self.output_bias = nn.Parameter(torch.zeros([]))
+ self.column_output_bias = nn.Parameter(torch.zeros([]))
+
+ # aggregation head
+ if config.num_aggregation_labels > 0:
+ self.aggregation_classifier = nn.Linear(config.hidden_size, config.num_aggregation_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ table_mask: Optional[torch.LongTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ aggregation_labels: Optional[torch.LongTensor] = None,
+ float_answer: Optional[torch.FloatTensor] = None,
+ numeric_values: Optional[torch.FloatTensor] = None,
+ numeric_values_scale: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TableQuestionAnsweringOutput]:
+ r"""
+ table_mask (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*):
+ Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and
+ padding are 0.
+ labels (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*):
+ Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the
+ answer appearing in the table. Can be obtained using [`AutoTokenizer`].
+
+ - 1 for tokens that are **part of the answer**,
+ - 0 for tokens that are **not part of the answer**.
+
+ aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`, *optional*):
+ Aggregation function index for every example in the batch for computing the aggregation loss. Indices
+ should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for
+ aggregation (WikiSQL-supervised).
+ float_answer (`torch.FloatTensor` of shape `(batch_size, )`, *optional*):
+ Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only
+ required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss.
+ numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*):
+ Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using
+ [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the
+ regression loss.
+ numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*):
+ Scale of the numeric values of every token. Can be obtained using [`AutoTokenizer`]. Only required in case
+ of weak supervision for aggregation (WTQ) to calculate the regression loss.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, TapasForQuestionAnswering
+ >>> import pandas as pd
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-wtq")
+ >>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq")
+
+ >>> data = {
+ ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
+ ... "Age": ["56", "45", "59"],
+ ... "Number of movies": ["87", "53", "69"],
+ ... }
+ >>> table = pd.DataFrame.from_dict(data)
+ >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
+
+ >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> logits = outputs.logits
+ >>> logits_aggregation = outputs.logits_aggregation
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.tapas(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ pooled_output = outputs[1]
+
+ sequence_output = self.dropout(sequence_output)
+
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ # Construct indices for the table.
+ if token_type_ids is None:
+ token_type_ids = torch.zeros(
+ (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device
+ )
+
+ token_types = [
+ "segment_ids",
+ "column_ids",
+ "row_ids",
+ "prev_labels",
+ "column_ranks",
+ "inv_column_ranks",
+ "numeric_relations",
+ ]
+
+ row_ids = token_type_ids[:, :, token_types.index("row_ids")]
+ column_ids = token_type_ids[:, :, token_types.index("column_ids")]
+
+ row_index = IndexMap(
+ indices=torch.min(row_ids, torch.as_tensor(self.config.max_num_rows - 1, device=row_ids.device)),
+ num_segments=self.config.max_num_rows,
+ batch_dims=1,
+ )
+ col_index = IndexMap(
+ indices=torch.min(column_ids, torch.as_tensor(self.config.max_num_columns - 1, device=column_ids.device)),
+ num_segments=self.config.max_num_columns,
+ batch_dims=1,
+ )
+ cell_index = ProductIndexMap(row_index, col_index)
+
+ # Masks.
+ input_shape = input_ids.size() if input_ids is not None else inputs_embeds.size()[:-1]
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ # Table cells only, without question tokens and table headers.
+ if table_mask is None:
+ table_mask = torch.where(row_ids > 0, torch.ones_like(row_ids), torch.zeros_like(row_ids))
+ # torch.FloatTensor[batch_size, seq_length]
+ input_mask_float = attention_mask.float().to(device)
+ table_mask_float = table_mask.float().to(device)
+ # Mask for cells that exist in the table (i.e. that are not padding).
+ cell_mask, _ = reduce_mean(input_mask_float, cell_index)
+
+ # Compute logits per token. These are used to select individual cells.
+ logits = compute_token_logits(sequence_output, self.config.temperature, self.output_weights, self.output_bias)
+
+ # Compute logits per column. These are used to select a column.
+ column_logits = None
+ if self.config.select_one_column:
+ column_logits = compute_column_logits(
+ sequence_output,
+ self.column_output_weights,
+ self.column_output_bias,
+ cell_index,
+ cell_mask,
+ self.config.allow_empty_column_selection,
+ )
+
+ # Aggregation logits
+ logits_aggregation = None
+ if self.config.num_aggregation_labels > 0:
+ logits_aggregation = self.aggregation_classifier(pooled_output)
+
+ # Total loss calculation
+ total_loss = 0.0
+ calculate_loss = False
+ if labels is not None:
+ calculate_loss = True
+ is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision
+
+ # Semi-supervised cell selection in case of no aggregation:
+ # If the answer (the denotation) appears directly in the table we might
+ # select the answer without applying any aggregation function. There are
+ # some ambiguous cases, see utils._calculate_aggregate_mask for more info.
+ # `aggregate_mask` is 1 for examples where we chose to aggregate and 0
+ # for examples where we chose to select the answer directly.
+ # `labels` encodes the positions of the answer appearing in the table.
+ if is_supervised:
+ aggregate_mask = None
+ else:
+ if float_answer is not None:
+ assert (
+ labels.shape[0] == float_answer.shape[0]
+ ), "Make sure the answers are a FloatTensor of shape (batch_size,)"
+ # [batch_size]
+ aggregate_mask = _calculate_aggregate_mask(
+ float_answer,
+ pooled_output,
+ self.config.cell_selection_preference,
+ labels,
+ self.aggregation_classifier,
+ )
+ else:
+ raise ValueError("You have to specify float answers in order to calculate the aggregate mask")
+
+ # Cell selection log-likelihood
+ if self.config.average_logits_per_cell:
+ logits_per_cell, _ = reduce_mean(logits, cell_index)
+ logits = gather(logits_per_cell, cell_index)
+ dist_per_token = torch.distributions.Bernoulli(logits=logits)
+
+ # Compute cell selection loss per example.
+ selection_loss_per_example = None
+ if not self.config.select_one_column:
+ weight = torch.where(
+ labels == 0,
+ torch.ones_like(labels, dtype=torch.float32),
+ self.config.positive_label_weight * torch.ones_like(labels, dtype=torch.float32),
+ )
+ selection_loss_per_token = -dist_per_token.log_prob(labels) * weight
+ selection_loss_per_example = torch.sum(selection_loss_per_token * input_mask_float, dim=1) / (
+ torch.sum(input_mask_float, dim=1) + EPSILON_ZERO_DIVISION
+ )
+ else:
+ selection_loss_per_example, logits = _single_column_cell_selection_loss(
+ logits, column_logits, labels, cell_index, col_index, cell_mask
+ )
+ dist_per_token = torch.distributions.Bernoulli(logits=logits)
+
+ # Supervised cell selection
+ if self.config.disable_per_token_loss:
+ pass
+ elif is_supervised:
+ total_loss += torch.mean(selection_loss_per_example)
+ else:
+ # For the not supervised case, do not assign loss for cell selection
+ total_loss += torch.mean(selection_loss_per_example * (1.0 - aggregate_mask))
+
+ # Semi-supervised regression loss and supervised loss for aggregations
+ if self.config.num_aggregation_labels > 0:
+ if is_supervised:
+ # Note that `aggregate_mask` is None if the setting is supervised.
+ if aggregation_labels is not None:
+ assert (
+ labels.shape[0] == aggregation_labels.shape[0]
+ ), "Make sure the aggregation labels are a LongTensor of shape (batch_size,)"
+ per_example_additional_loss = _calculate_aggregation_loss(
+ logits_aggregation,
+ aggregate_mask,
+ aggregation_labels,
+ self.config.use_answer_as_supervision,
+ self.config.num_aggregation_labels,
+ self.config.aggregation_loss_weight,
+ )
+ else:
+ raise ValueError(
+ "You have to specify aggregation labels in order to calculate the aggregation loss"
+ )
+ else:
+ # Set aggregation labels to zeros
+ aggregation_labels = torch.zeros(labels.shape[0], dtype=torch.long, device=labels.device)
+ per_example_additional_loss = _calculate_aggregation_loss(
+ logits_aggregation,
+ aggregate_mask,
+ aggregation_labels,
+ self.config.use_answer_as_supervision,
+ self.config.num_aggregation_labels,
+ self.config.aggregation_loss_weight,
+ )
+
+ if self.config.use_answer_as_supervision:
+ if numeric_values is not None and numeric_values_scale is not None:
+ assert numeric_values.shape == numeric_values_scale.shape
+ # Add regression loss for numeric answers which require aggregation.
+ answer_loss, large_answer_loss_mask = _calculate_regression_loss(
+ float_answer,
+ aggregate_mask,
+ dist_per_token,
+ numeric_values,
+ numeric_values_scale,
+ table_mask_float,
+ logits_aggregation,
+ self.config,
+ )
+ per_example_additional_loss += answer_loss
+ # Zero loss for examples with answer_loss > cutoff.
+ per_example_additional_loss *= large_answer_loss_mask
+ else:
+ raise ValueError(
+ "You have to specify numeric values and numeric values scale in order to calculate the"
+ " regression loss"
+ )
+
+ total_loss += torch.mean(per_example_additional_loss)
+
+ else:
+ # if no label ids are provided, set them to zeros in order to properly compute logits
+ labels = torch.zeros_like(logits)
+ _, logits = _single_column_cell_selection_loss(
+ logits, column_logits, labels, cell_index, col_index, cell_mask
+ )
+ if not return_dict:
+ output = (logits, logits_aggregation) + outputs[2:]
+ return ((total_loss,) + output) if calculate_loss else output
+
+ return TableQuestionAnsweringOutput(
+ loss=total_loss if calculate_loss else None,
+ logits=logits,
+ logits_aggregation=logits_aggregation,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table
+ entailment tasks, such as TabFact (Chen et al., 2020).
+ """,
+ TAPAS_START_DOCSTRING,
+)
+class TapasForSequenceClassification(TapasPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.tapas = TapasModel(config)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called
+ "classification_class_index" in the original implementation.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, TapasForSequenceClassification
+ >>> import torch
+ >>> import pandas as pd
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact")
+ >>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact")
+
+ >>> data = {
+ ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
+ ... "Age": ["56", "45", "59"],
+ ... "Number of movies": ["87", "53", "69"],
+ ... }
+ >>> table = pd.DataFrame.from_dict(data)
+ >>> queries = [
+ ... "There is only one actor who is 45 years old",
+ ... "There are 3 actors which played in more than 60 movies",
+ ... ]
+
+ >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt")
+ >>> labels = torch.tensor([1, 0]) # 1 means entailed, 0 means refuted
+
+ >>> outputs = model(**inputs, labels=labels)
+ >>> loss = outputs.loss
+ >>> logits = outputs.logits
+ ```"""
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.tapas(
+ input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+""" TAPAS utilities."""
+
+
+class AverageApproximationFunction(str, enum.Enum):
+ RATIO = "ratio"
+ FIRST_ORDER = "first_order"
+ SECOND_ORDER = "second_order"
+
+
+# Beginning of everything related to segmented tensors
+
+
+class IndexMap(object):
+ """Index grouping entries within a tensor."""
+
+ def __init__(self, indices, num_segments, batch_dims=0):
+ """
+ Creates an index
+
+ Args:
+ indices (`torch.LongTensor`, same shape as a *values* Tensor to which the indices refer):
+ Tensor containing the indices.
+ num_segments (`torch.LongTensor`):
+ Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same
+ number of segments (although many segments can be empty).
+ batch_dims (`int`, *optional*, defaults to 0):
+ The number of batch dimensions. The first *batch_dims* dimensions of a SegmentedTensor are treated as
+ batch dimensions. Segments in different batch elements are always distinct even if they have the same
+ index.
+ """
+ self.indices = torch.as_tensor(indices)
+ self.num_segments = torch.as_tensor(num_segments, device=indices.device)
+ self.batch_dims = batch_dims
+
+ def batch_shape(self):
+ return self.indices.size()[: self.batch_dims] # returns a torch.Size object
+
+
+class ProductIndexMap(IndexMap):
+ """The product of two indices."""
+
+ def __init__(self, outer_index, inner_index):
+ """
+ Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the
+ intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows
+ and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation
+ combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has *num_segments* equal to
+ *outer_index.num_segments* * *inner_index.num_segments*
+
+ Args:
+ outer_index (`IndexMap`):
+ IndexMap.
+ inner_index (`IndexMap`):
+ IndexMap, must have the same shape as *outer_index*.
+ """
+ if outer_index.batch_dims != inner_index.batch_dims:
+ raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.")
+
+ super().__init__(
+ indices=(inner_index.indices + outer_index.indices * inner_index.num_segments),
+ num_segments=inner_index.num_segments * outer_index.num_segments,
+ batch_dims=inner_index.batch_dims,
+ )
+ self.outer_index = outer_index
+ self.inner_index = inner_index
+
+ def project_outer(self, index):
+ """Projects an index with the same index set onto the outer components."""
+ indices = torch.div(index.indices, self.inner_index.num_segments, rounding_mode="floor").type(torch.long)
+ return IndexMap(indices=indices, num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims)
+
+ def project_inner(self, index):
+ """Projects an index with the same index set onto the inner components."""
+ return IndexMap(
+ indices=torch.fmod(index.indices, self.inner_index.num_segments)
+ .type(torch.float)
+ .floor()
+ .type(torch.long),
+ num_segments=self.inner_index.num_segments,
+ batch_dims=index.batch_dims,
+ )
+
+
+def gather(values, index, name="segmented_gather"):
+ """
+ Gathers from *values* using the index map. For each element in the domain of the index map this operation looks up
+ a value for that index in *values*. Two elements from the same segment always get assigned the same value.
+
+ Args:
+ values (`torch.Tensor` of shape (B1, ..., Bn, num_segments, V1, ...)):
+ Tensor with segment values.
+ index (`IndexMap` of shape (B1, ..., Bn, I1, ..., Ik)):
+ IndexMap.
+ name (`str`, *optional*, defaults to 'segmented_gather'):
+ Name for the operation. Currently not used
+
+ Returns:
+ `tuple(torch.Tensor)`: Tensor of shape (B1, ..., Bn, I1, ..., Ik, V1, ...) with the gathered values.
+ """
+ indices = index.indices
+ # first, check whether the indices of the index represent scalar values (i.e. not vectorized)
+ if len(values.shape[index.batch_dims :]) < 2:
+ return torch.gather(
+ values,
+ index.batch_dims,
+ indices.view(
+ values.size()[0], -1
+ ), # torch.gather expects index to have the same number of dimensions as values
+ ).view(indices.size())
+ else:
+ # this means we have a vectorized version
+ # we have to adjust the index
+ indices = indices.unsqueeze(-1).expand(values.shape)
+ return torch.gather(values, index.batch_dims, indices)
+
+
+def flatten(index, name="segmented_flatten"):
+ """
+ Flattens a batched index map (which is typically of shape batch_size, seq_length) to a 1d index map. This operation
+ relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by
+ *num_segments* * (k - 1). The result is a tensor with *num_segments* multiplied by the number of elements in the
+ batch.
+
+ Args:
+ index (`IndexMap`):
+ IndexMap to flatten.
+ name (`str`, *optional*, defaults to 'segmented_flatten'):
+ Name for the operation. Currently not used
+
+ Returns:
+ (`IndexMap`): The flattened IndexMap.
+ """
+ # first, get batch_size as scalar tensor
+ batch_size = torch.prod(torch.tensor(list(index.batch_shape())))
+ # next, create offset as 1-D tensor of length batch_size,
+ # and multiply element-wise by num segments (to offset different elements in the batch) e.g. if batch size is 2: [0, 64]
+ offset = torch.arange(start=0, end=batch_size, device=index.num_segments.device) * index.num_segments
+ offset = offset.view(index.batch_shape())
+ for _ in range(index.batch_dims, len(index.indices.size())): # typically range(1,2)
+ offset = offset.unsqueeze(-1)
+
+ indices = offset + index.indices
+ return IndexMap(indices=indices.view(-1), num_segments=index.num_segments * batch_size, batch_dims=0)
+
+
+def range_index_map(batch_shape, num_segments, name="range_index_map"):
+ """
+ Constructs an index map equal to range(num_segments).
+
+ Args:
+ batch_shape (`torch.Size`):
+ Batch shape
+ num_segments (`int`):
+ Number of segments
+ name (`str`, *optional*, defaults to 'range_index_map'):
+ Name for the operation. Currently not used
+
+ Returns:
+ (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
+ """
+ batch_shape = torch.as_tensor(
+ batch_shape, dtype=torch.long
+ ) # create a rank 1 tensor vector containing batch_shape (e.g. [2])
+ assert len(batch_shape.size()) == 1
+ num_segments = torch.as_tensor(num_segments) # create a rank 0 tensor (scalar) containing num_segments (e.g. 64)
+ assert len(num_segments.size()) == 0
+
+ indices = torch.arange(
+ start=0, end=num_segments, device=num_segments.device
+ ) # create a rank 1 vector with num_segments elements
+ new_tensor = torch.cat(
+ [torch.ones_like(batch_shape, dtype=torch.long, device=num_segments.device), num_segments.unsqueeze(dim=0)],
+ dim=0,
+ )
+ # new_tensor is just a vector of [1 64] for example (assuming only 1 batch dimension)
+ new_shape = [int(x) for x in new_tensor.tolist()]
+ indices = indices.view(new_shape)
+
+ multiples = torch.cat([batch_shape, torch.as_tensor([1])], dim=0)
+ indices = indices.repeat(multiples.tolist())
+ # equivalent (in Numpy:)
+ # indices = torch.as_tensor(np.tile(indices.numpy(), multiples.tolist()))
+
+ return IndexMap(indices=indices, num_segments=num_segments, batch_dims=list(batch_shape.size())[0])
+
+
+def _segment_reduce(values, index, segment_reduce_fn, name):
+ """
+ Applies a segment reduction segment-wise.
+
+ Args:
+ values (`torch.Tensor`):
+ Tensor with segment values.
+ index (`IndexMap`):
+ IndexMap.
+ segment_reduce_fn (`str`):
+ Name for the reduce operation. One of "sum", "mean", "max" or "min".
+ name (`str`):
+ Name for the operation. Currently not used
+
+ Returns:
+ (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
+ """
+ # Flatten the batch dimensions, as segments ops (scatter) do not support batching.
+ # However if `values` has extra dimensions to the right keep them
+ # unflattened. Segmented ops support vector-valued operations.
+ flat_index = flatten(index)
+ vector_shape = values.size()[len(index.indices.size()) :] # torch.Size object
+ flattened_shape = torch.cat(
+ [torch.as_tensor([-1], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long)], dim=0
+ )
+ # changed "view" by "reshape" in the following line
+ flat_values = values.reshape(flattened_shape.tolist())
+
+ out = torch.zeros(int(flat_index.num_segments), dtype=torch.float, device=flat_values.device)
+ segment_means = out.scatter_reduce(
+ dim=0, index=flat_index.indices.long(), src=flat_values.float(), reduce=segment_reduce_fn, include_self=False
+ )
+
+ # Unflatten the values.
+ new_shape = torch.cat(
+ [
+ torch.as_tensor(index.batch_shape(), dtype=torch.long),
+ torch.as_tensor([index.num_segments], dtype=torch.long),
+ torch.as_tensor(vector_shape, dtype=torch.long),
+ ],
+ dim=0,
+ )
+
+ output_values = segment_means.clone().view(new_shape.tolist()).to(values.dtype)
+ output_index = range_index_map(index.batch_shape(), index.num_segments)
+ return output_values, output_index
+
+
+def reduce_sum(values, index, name="segmented_reduce_sum"):
+ """
+ Sums a tensor over its segments.
+
+ Outputs 0 for empty segments.
+
+ This operations computes the sum over segments, with support for:
+
+ - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
+ - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a sum of
+ vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
+
+ Args:
+ values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
+ Tensor containing the values of which the sum must be taken segment-wise.
+ index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
+ Index defining the segments.
+ name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
+ Name for the operation. Currently not used
+
+ Returns:
+ output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
+ output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. .
+ """
+ return _segment_reduce(values, index, "sum", name)
+
+
+def reduce_mean(values, index, name="segmented_reduce_mean"):
+ """
+ Averages a tensor over its segments.
+
+ Outputs 0 for empty segments.
+
+ This operations computes the mean over segments, with support for:
+
+ - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
+ - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a mean of
+ vectors rather than scalars.
+
+ Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
+
+ Args:
+ values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
+ Tensor containing the values of which the mean must be taken segment-wise.
+ index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
+ Index defining the segments.
+ name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
+ Name for the operation. Currently not used
+
+ Returns:
+ output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
+ output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments].
+ """
+ return _segment_reduce(values, index, "mean", name)
+
+
+def reduce_max(values, index, name="segmented_reduce_max"):
+ """
+ Computes the maximum over segments.
+
+ This operation computes the maximum over segments, with support for:
+
+ - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
+ - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise
+ maximum of vectors rather than scalars.
+
+ Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
+
+ Args:
+ values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
+ Tensor containing the values of which the max must be taken segment-wise.
+ index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
+ Index defining the segments.
+ name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
+ Name for the operation. Currently not used
+
+ Returns:
+ output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
+ output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments].
+ """
+ return _segment_reduce(values, index, "amax", name)
+
+
+def reduce_min(values, index, name="segmented_reduce_min"):
+ """
+ Computes the minimum over segments.
+
+ This operations computes the minimum over segments, with support for:
+
+ - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
+ - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise
+ minimum of vectors rather than scalars.
+
+ Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
+
+ Args:
+ values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]):
+ Tensor containing the values of which the min must be taken segment-wise.
+ index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].):
+ Index defining the segments.
+ name (`str`, *optional*, defaults to 'segmented_reduce_sum'):
+ Name for the operation. Currently not used
+
+ Returns:
+ output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the
+ output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments].
+ """
+ return _segment_reduce(values, index, "amin", name)
+
+
+# End of everything related to segmented tensors
+
+
+def compute_column_logits(
+ sequence_output, column_output_weights, column_output_bias, cell_index, cell_mask, allow_empty_column_selection
+):
+ """
+ Computes the column logits.
+
+ Args:
+ sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model.
+ column_output_weights (`torch.FloatTensor` of shape `(hidden_size)`):
+ Weights of the linear layer for column selection.
+ column_output_bias (`torch.FloatTensor` of shape `()`):
+ Bias of the linear layer for column selection.
+ cell_index (`ProductIndexMap`):
+ Index that groups tokens into cells.
+ cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`):
+ Mask for cells that exist in the table (i.e. that are not padding).
+ allow_empty_column_selection (`bool`):
+ Whether to allow not to select any column
+
+ Returns:
+ column_logits (`torch.FloatTensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits
+ for every example in the batch.
+ """
+
+ # First, compute the token logits (batch_size, seq_len) - without temperature
+ token_logits = torch.einsum("bsj,j->bs", sequence_output, column_output_weights) + column_output_bias
+
+ # Next, average the logits per cell (batch_size, max_num_cols*max_num_rows)
+ cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index)
+
+ # Finally, average the logits per column (batch_size, max_num_cols)
+ column_index = cell_index.project_inner(cell_logits_index)
+ column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index)
+
+ cell_count, _ = reduce_sum(cell_mask, column_index)
+ column_logits /= cell_count + EPSILON_ZERO_DIVISION
+
+ # Mask columns that do not appear in the example.
+ is_padding = torch.logical_and(cell_count < 0.5, ~torch.eq(out_index.indices, 0))
+ column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor(
+ is_padding, dtype=torch.float32, device=is_padding.device
+ )
+
+ if not allow_empty_column_selection:
+ column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor(
+ torch.eq(out_index.indices, 0), dtype=torch.float32, device=out_index.indices.device
+ )
+
+ return column_logits
+
+
+def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask):
+ """
+ Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The
+ model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside
+ the selected column are never selected.
+
+ Args:
+ token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Tensor containing the logits per token.
+ column_logits (`torch.FloatTensor` of shape `(batch_size, max_num_cols)`):
+ Tensor containing the logits per column.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Labels per token.
+ cell_index (`ProductIndexMap`):
+ Index that groups tokens into cells.
+ col_index (`IndexMap`):
+ Index that groups tokens into columns.
+ cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`):
+ Mask for cells that exist in the table (i.e. that are not padding).
+
+ Returns:
+ selection_loss_per_example (`torch.FloatTensor` of shape `(batch_size,)`): Loss for each example. logits
+ (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): New logits which are only allowed to select
+ cells in a single column. Logits outside of the most likely column according to *column_logits* will be set to
+ a very low value (such that the probabilities are 0).
+ """
+ # Part 1: column loss
+
+ # First find the column we should select. We use the column with maximum number of selected cells.
+ labels_per_column, _ = reduce_sum(torch.as_tensor(labels, dtype=torch.float32, device=labels.device), col_index)
+ # shape of labels_per_column is (batch_size, max_num_cols). It contains the number of label ids for every column, for every example
+ column_label = torch.argmax(labels_per_column, dim=-1) # shape (batch_size,)
+ # Check if there are no selected cells in the column. In that case the model
+ # should predict the special column id 0, which means "select nothing".
+ no_cell_selected = torch.eq(
+ torch.max(labels_per_column, dim=-1)[0], 0
+ ) # no_cell_selected is of shape (batch_size,) and equals True
+ # if an example of the batch has no cells selected (i.e. if there are no labels set to 1 for that example)
+ column_label = torch.where(
+ no_cell_selected.view(column_label.size()), torch.zeros_like(column_label), column_label
+ )
+
+ column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols)
+ column_loss_per_example = -column_dist.log_prob(column_label)
+
+ # Part 2: cell loss
+
+ # Reduce the labels and logits to per-cell from per-token.
+ # logits_per_cell: shape (batch_size, max_num_rows*max_num_cols) i.e. (batch_size, 64*32)
+ logits_per_cell, _ = reduce_mean(token_logits, cell_index)
+ # labels_per_cell: shape (batch_size, 64*32), indicating whether each cell should be selected (1) or not (0)
+ labels_per_cell, labels_index = reduce_max(
+ torch.as_tensor(labels, dtype=torch.long, device=labels.device), cell_index
+ )
+
+ # Mask for the selected column.
+ # column_id_for_cells: shape (batch_size, 64*32), indicating to which column each cell belongs
+ column_id_for_cells = cell_index.project_inner(labels_index).indices
+ # column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column to be selected
+ column_mask = torch.as_tensor(
+ torch.eq(column_id_for_cells, torch.unsqueeze(column_label, dim=-1)),
+ dtype=torch.float32,
+ device=cell_mask.device,
+ )
+
+ # Compute the log-likelihood for cells, but only for the selected column.
+ cell_dist = torch.distributions.Bernoulli(logits=logits_per_cell) # shape (batch_size, 64*32)
+ cell_log_prob = cell_dist.log_prob(labels_per_cell.type(torch.float32)) # shape(batch_size, 64*32)
+
+ cell_loss = -torch.sum(cell_log_prob * column_mask * cell_mask, dim=1)
+
+ # We need to normalize the loss by the number of cells in the column.
+ cell_loss /= torch.sum(column_mask * cell_mask, dim=1) + EPSILON_ZERO_DIVISION
+
+ selection_loss_per_example = column_loss_per_example
+ selection_loss_per_example += torch.where(
+ no_cell_selected.view(selection_loss_per_example.size()),
+ torch.zeros_like(selection_loss_per_example),
+ cell_loss,
+ )
+
+ # Set the probs outside the selected column (selected by the *model*)
+ # to 0. This ensures backwards compatibility with models that select
+ # cells from multiple columns.
+ selected_column_id = torch.as_tensor(
+ torch.argmax(column_logits, dim=-1), dtype=torch.long, device=column_logits.device
+ ) # shape (batch_size,)
+
+ # selected_column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column selected by the model
+ selected_column_mask = torch.as_tensor(
+ torch.eq(column_id_for_cells, torch.unsqueeze(selected_column_id, dim=-1)),
+ dtype=torch.float32,
+ device=selected_column_id.device,
+ )
+
+ # Never select cells with the special column id 0.
+ selected_column_mask = torch.where(
+ torch.eq(column_id_for_cells, 0).view(selected_column_mask.size()),
+ torch.zeros_like(selected_column_mask),
+ selected_column_mask,
+ )
+ new_logits_per_cell = logits_per_cell + CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask)
+ logits = gather(new_logits_per_cell, cell_index)
+
+ return selection_loss_per_example, logits
+
+
+def compute_token_logits(sequence_output, temperature, output_weights, output_bias):
+ """
+ Computes logits per token
+
+ Args:
+ sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model.
+ temperature (`float`):
+ Temperature for the Bernoulli distribution.
+ output_weights (`torch.FloatTensor` of shape `(hidden_size,)`):
+ Weights of the linear layer for cell selection.
+ output_bias (`torch.FloatTensor` of shape `()`):
+ Bias of the linear layer for cell selection
+
+ Returns:
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Logits per token.
+ """
+ logits = (torch.einsum("bsj,j->bs", sequence_output, output_weights) + output_bias) / temperature
+
+ return logits
+
+
+def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier):
+ """
+ Finds examples where the model should select cells with no aggregation.
+
+ Returns a mask that determines for which examples should the model select answers directly from the table, without
+ any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only
+ apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation
+ case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the
+ aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold
+ for this is a hyperparameter *cell_selection_preference*
+
+ Args:
+ answer (`torch.FloatTensor` of shape `(batch_size, )`):
+ Answer for every example in the batch. Nan if there is no scalar answer.
+ pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
+ Output of the pooler (BertPooler) on top of the encoder layer.
+ cell_selection_preference (`float`):
+ Preference for cell selection in ambiguous cases.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Labels per token. aggregation_classifier (`torch.nn.Linear`): Aggregation head
+
+ Returns:
+ aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use
+ aggregation functions.
+ """
+ # torch.FloatTensor(batch_size,)
+ aggregate_mask_init = torch.logical_not(torch.isnan(answer)).type(torch.FloatTensor).to(answer.device)
+ logits_aggregation = aggregation_classifier(pooled_output)
+ dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation)
+ # Index 0 corresponds to "no aggregation".
+ aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1)
+
+ # Cell selection examples according to current model.
+ is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference
+
+ # Examples with non-empty cell selection supervision.
+ is_cell_supervision_available = torch.sum(labels, dim=1) > 0
+
+ # torch.where is not equivalent to tf.where (in tensorflow 1)
+ # hence the added .view on the condition to match the shape of the first tensor
+ aggregate_mask = torch.where(
+ torch.logical_and(is_pred_cell_selection, is_cell_supervision_available).view(aggregate_mask_init.size()),
+ torch.zeros_like(aggregate_mask_init, dtype=torch.float32),
+ aggregate_mask_init,
+ )
+
+ aggregate_mask = aggregate_mask.detach()
+
+ return aggregate_mask
+
+
+def _calculate_aggregation_loss_known(
+ logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels
+):
+ """
+ Calculates aggregation loss when its type is known during training.
+
+ In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation"
+ should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting
+ where aggregation type is always known, standard cross entropy loss is accumulated for all examples
+
+ Args:
+ logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
+ Logits per aggregation operation.
+ aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
+ A mask set to 1 for examples that should use aggregation functions.
+ aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`):
+ Aggregation function id for every example in the batch.
+ use_answer_as_supervision (`bool`, *optional*):
+ Whether to use the answer as the only supervision for aggregation examples.
+ num_aggregation_labels (`int`, *optional*, defaults to 0):
+ The number of aggregation operators to predict.
+
+ Returns:
+ aggregation_loss_known (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (when its type is known
+ during training) per example.
+ """
+ if use_answer_as_supervision:
+ # Prepare "no aggregation" targets for cell selection examples.
+ target_aggregation = torch.zeros_like(aggregate_mask, dtype=torch.long)
+ else:
+ # Use aggregation supervision as the target.
+ target_aggregation = aggregation_labels
+
+ one_hot_labels = nn.functional.one_hot(target_aggregation, num_classes=num_aggregation_labels).type(torch.float32)
+ log_probs = nn.functional.log_softmax(logits_aggregation, dim=-1)
+
+ # torch.FloatTensor[batch_size]
+ per_example_aggregation_intermediate = -torch.sum(one_hot_labels * log_probs, dim=-1)
+ if use_answer_as_supervision:
+ # Accumulate loss only for examples requiring cell selection
+ # (no aggregation).
+ return per_example_aggregation_intermediate * (1 - aggregate_mask)
+ else:
+ return per_example_aggregation_intermediate
+
+
+def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask):
+ """
+ Calculates aggregation loss in the case of answer supervision.
+
+ Args:
+ logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
+ Logits per aggregation operation.
+ aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
+ A mask set to 1 for examples that should use aggregation functions
+
+ Returns:
+ aggregation_loss_unknown (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (in case of answer
+ supervision) per example.
+ """
+ dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation)
+ # Index 0 corresponds to "no aggregation".
+ aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1)
+ # Predict some aggregation in case of an answer that needs aggregation.
+ # This increases the probability of all aggregation functions, in a way
+ # similar to MML, but without considering whether the function gives the
+ # correct answer.
+ return -torch.log(aggregation_ops_total_mass) * aggregate_mask
+
+
+def _calculate_aggregation_loss(
+ logits_aggregation,
+ aggregate_mask,
+ aggregation_labels,
+ use_answer_as_supervision,
+ num_aggregation_labels,
+ aggregation_loss_weight,
+):
+ """
+ Calculates the aggregation loss per example.
+
+ Args:
+ logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
+ Logits per aggregation operation.
+ aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`):
+ A mask set to 1 for examples that should use aggregation functions.
+ aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`):
+ Aggregation function id for every example in the batch.
+ use_answer_as_supervision (`bool`, *optional*):
+ Whether to use the answer as the only supervision for aggregation examples.
+ num_aggregation_labels (`int`, *optional*, defaults to 0):
+ The number of aggregation operators to predict.
+ aggregation_loss_weight (`float`, *optional*, defaults to 1.0):
+ Importance weight for the aggregation loss.
+
+ Returns:
+ aggregation_loss (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss per example.
+ """
+ per_example_aggregation_loss = _calculate_aggregation_loss_known(
+ logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels
+ )
+
+ if use_answer_as_supervision:
+ # Add aggregation loss for numeric answers that need aggregation.
+ per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask)
+ return aggregation_loss_weight * per_example_aggregation_loss
+
+
+def _calculate_expected_result(
+ dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config
+):
+ """
+ Calculates the expected result given cell and aggregation probabilities.
+
+ Args:
+ dist_per_cell (`torch.distributions.Bernoulli`):
+ Cell selection distribution for each cell.
+ numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
+ Numeric values of every token. Nan for tokens which are not numeric values.
+ numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
+ Scale of the numeric values of every token.
+ input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
+ Mask for the table, without question tokens and table headers.
+ logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
+ Logits per aggregation operation.
+ config ([`TapasConfig`]):
+ Model configuration class with all the hyperparameters of the model
+
+ Returns:
+ expected_result (`torch.FloatTensor` of shape `(batch_size,)`): The expected result per example.
+ """
+ if config.use_gumbel_for_cells:
+ gumbel_dist = torch.distributions.RelaxedBernoulli(
+ # The token logits where already divided by the temperature and used for
+ # computing cell selection errors so we need to multiply it again here
+ temperature=config.temperature,
+ logits=dist_per_cell.logits * config.temperature,
+ )
+ scaled_probability_per_cell = gumbel_dist.sample()
+ else:
+ scaled_probability_per_cell = dist_per_cell.probs
+
+ # [batch_size, seq_length]
+ scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float
+ count_result = torch.sum(scaled_probability_per_cell, dim=1)
+ numeric_values_masked = torch.where(
+ torch.isnan(numeric_values), torch.zeros_like(numeric_values), numeric_values
+ ) # Mask non-numeric table values to zero.
+ sum_result = torch.sum(scaled_probability_per_cell * numeric_values_masked, dim=1)
+ avg_approximation = config.average_approximation_function
+ if avg_approximation == AverageApproximationFunction.RATIO:
+ average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION)
+ elif avg_approximation == AverageApproximationFunction.FIRST_ORDER:
+ # The sum of all probabilities except that correspond to other cells
+ # Ex here stands for expectation, more explicitly the expectation of the sum of N-1 Bernoulli random variables plus
+ # the constant 1, which is computed as adding all N expected values and subtracting the extra one. It corresponds to X_c
+ # in Appendix D of the original TAPAS paper which is trying to approximate the average of a random set.
+ ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1
+ average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell / ex, dim=1)
+ elif avg_approximation == AverageApproximationFunction.SECOND_ORDER:
+ # The sum of all probabilities except that correspond to other cells
+ ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1
+ pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell)
+ var = torch.sum(pointwise_var, dim=1, keepdim=True) - pointwise_var
+
+ multiplier = (var / torch.square(ex) + 1) / ex
+ average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell * multiplier, dim=1)
+ else:
+ raise ValueError(f"Invalid average_approximation_function: {config.average_approximation_function}")
+
+ if config.use_gumbel_for_aggregation:
+ gumbel_dist = torch.distributions.RelaxedOneHotCategorical(
+ config.aggregation_temperature, logits=logits_aggregation[:, 1:]
+ )
+ # [batch_size, num_aggregation_labels - 1]
+ aggregation_op_only_probs = gumbel_dist.sample()
+ else:
+ # [batch_size, num_aggregation_labels - 1]
+ aggregation_op_only_probs = nn.functional.softmax(
+ logits_aggregation[:, 1:] / config.aggregation_temperature, dim=-1
+ )
+
+ all_results = torch.cat(
+ [
+ torch.unsqueeze(sum_result, dim=1),
+ torch.unsqueeze(average_result, dim=1),
+ torch.unsqueeze(count_result, dim=1),
+ ],
+ dim=1,
+ )
+
+ expected_result = torch.sum(all_results * aggregation_op_only_probs, dim=1)
+ return expected_result
+
+
+# PyTorch does not currently support Huber loss with custom delta so we define it ourself
+def huber_loss(input, target, delta: float = 1.0):
+ errors = torch.abs(input - target) # shape (batch_size,)
+ return torch.where(errors < delta, 0.5 * errors**2, errors * delta - (0.5 * delta**2))
+
+
+def _calculate_regression_loss(
+ answer,
+ aggregate_mask,
+ dist_per_cell,
+ numeric_values,
+ numeric_values_scale,
+ input_mask_float,
+ logits_aggregation,
+ config,
+):
+ """
+ Calculates the regression loss per example.
+
+ Args:
+ answer (`torch.FloatTensor` of shape `(batch_size,)`):
+ Answer for every example in the batch. Nan if there is no scalar answer.
+ aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`):
+ A mask set to 1 for examples that should use aggregation functions.
+ dist_per_cell (`torch.distributions.Bernoulli`):
+ Cell selection distribution for each cell.
+ numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
+ Numeric values of every token. Nan for tokens which are not numeric values.
+ numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
+ Scale of the numeric values of every token.
+ input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`):
+ Mask for the table, without question tokens and table headers.
+ logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`):
+ Logits per aggregation operation.
+ config ([`TapasConfig`]):
+ Model configuration class with all the parameters of the model
+
+ Returns:
+ per_example_answer_loss_scaled (`torch.FloatTensor` of shape `(batch_size,)`): Scales answer loss for each
+ example in the batch. large_answer_loss_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask which is 1
+ for examples for which their answer loss is larger than the answer_loss_cutoff.
+ """
+ # float32 (batch_size,)
+ expected_result = _calculate_expected_result(
+ dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config
+ )
+
+ # float32 (batch_size,)
+ answer_masked = torch.where(torch.isnan(answer), torch.zeros_like(answer), answer)
+
+ if config.use_normalized_answer_loss:
+ normalizer = (torch.max(torch.abs(expected_result), torch.abs(answer_masked)) + EPSILON_ZERO_DIVISION).detach()
+
+ normalized_answer_masked = answer_masked / normalizer
+ normalized_expected_result = expected_result / normalizer
+ per_example_answer_loss = huber_loss(
+ normalized_expected_result * aggregate_mask, normalized_answer_masked * aggregate_mask
+ )
+ else:
+ per_example_answer_loss = huber_loss(
+ expected_result * aggregate_mask, answer_masked * aggregate_mask, delta=config.huber_loss_delta
+ )
+
+ if config.answer_loss_cutoff is None:
+ large_answer_loss_mask = torch.ones_like(per_example_answer_loss, dtype=torch.float32)
+
+ else:
+ large_answer_loss_mask = torch.where(
+ per_example_answer_loss > config.answer_loss_cutoff,
+ torch.zeros_like(per_example_answer_loss, dtype=torch.float32),
+ torch.ones_like(per_example_answer_loss, dtype=torch.float32),
+ )
+ per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask)
+
+ return per_example_answer_loss_scaled, large_answer_loss_mask
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/modeling_tf_tapas.py b/venv/lib/python3.10/site-packages/transformers/models/tapas/modeling_tf_tapas.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b2ed5fab455a8b782f8dfe51394ffa6abf94446
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tapas/modeling_tf_tapas.py
@@ -0,0 +1,2450 @@
+# coding=utf-8
+# Copyright 2021 Google Research and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""TF 2.0 TAPAS model."""
+
+
+from __future__ import annotations
+
+import enum
+import math
+from dataclasses import dataclass
+from typing import Dict, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutputWithPastAndCrossAttentions,
+ TFBaseModelOutputWithPooling,
+ TFMaskedLMOutput,
+ TFSequenceClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFPreTrainedModel,
+ TFSequenceClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_tensorflow_probability_available,
+ logging,
+ replace_return_docstrings,
+ requires_backends,
+)
+from .configuration_tapas import TapasConfig
+
+
+logger = logging.get_logger(__name__)
+
+# soft dependency
+if is_tensorflow_probability_available():
+ try:
+ import tensorflow_probability as tfp
+
+ # On the first call, check whether a compatible version of TensorFlow is installed
+ # TensorFlow Probability depends on a recent stable release of TensorFlow
+ n = tfp.distributions.Normal(loc=0.0, scale=1.0)
+ except ImportError:
+ logger.error(
+ "TAPAS models are not usable since `tensorflow_probability` can't be loaded. "
+ "It seems you have `tensorflow_probability` installed with the wrong tensorflow version. "
+ "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability."
+ )
+
+_CONFIG_FOR_DOC = "TapasConfig"
+_CHECKPOINT_FOR_DOC = "google/tapas-base"
+
+
+from ..deprecated._archive_maps import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+EPSILON_ZERO_DIVISION = 1e-10
+CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0
+
+
+@dataclass
+class TFTableQuestionAnsweringOutput(ModelOutput):
+ """
+ Output type of [`TFTapasForQuestionAnswering`].
+
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)):
+ Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the
+ semi-supervised regression loss and (optionally) supervised loss for aggregations.
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Prediction scores of the cell selection head, for every token.
+ logits_aggregation (`tf.Tensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`):
+ Prediction scores of the aggregation head, for every aggregation operator.
+ hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape
+ `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus
+ the initial embedding outputs.
+ attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ loss: tf.Tensor | None = None
+ logits: tf.Tensor = None
+ logits_aggregation: tf.Tensor | None = None
+ hidden_states: Tuple[tf.Tensor] | None = None
+ attentions: Tuple[tf.Tensor] | None = None
+
+
+class TFTapasEmbeddings(keras.layers.Layer):
+ """
+ Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of
+ additional token type embeddings to encode tabular structure.
+ """
+
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.number_of_token_type_embeddings = len(config.type_vocab_sizes)
+ self.reset_position_index_per_cell = config.reset_position_index_per_cell
+ self.hidden_size = config.hidden_size
+ self.max_position_embeddings = config.max_position_embeddings
+ self.initializer_range = config.initializer_range
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ )
+ for i, type_vocab_size in enumerate(self.config.type_vocab_sizes):
+ with tf.name_scope(f"token_type_embeddings_{i}"):
+ setattr(
+ self,
+ f"token_type_embeddings_{i}",
+ self.add_weight(
+ name="embeddings",
+ shape=[type_vocab_size, self.hidden_size],
+ initializer=get_initializer(self.initializer_range),
+ ),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+ def call(
+ self,
+ input_ids: tf.Tensor = None,
+ position_ids: tf.Tensor = None,
+ token_type_ids: tf.Tensor = None,
+ inputs_embeds: tf.Tensor = None,
+ training: bool = False,
+ ) -> tf.Tensor:
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+ if input_ids is not None:
+ input_shape = shape_list(input_ids)
+ else:
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ seq_length = input_shape[1]
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape + [self.number_of_token_type_embeddings], value=0)
+
+ if position_ids is None:
+ # create absolute position embeddings
+ position_ids = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0)
+ position_ids = tf.broadcast_to(position_ids, shape=input_shape)
+ # when self.config.reset_position_index_per_cell is set to True, create relative position embeddings
+ if self.reset_position_index_per_cell:
+ # shape (batch_size, seq_len)
+ col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1)
+ # shape (batch_size, seq_len)
+ row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1)
+ # shape (batch_size, seq_len)
+ full_index = ProductIndexMap(col_index, row_index)
+ # shape (max_rows * max_columns,). First absolute position for every cell
+ first_position_per_segment = reduce_min(position_ids, full_index)[0]
+ # ? shape (batch_size, seq_len). First absolute position of the cell for every token
+ first_position = gather(first_position_per_segment, full_index)
+ # shape (1, seq_len)
+ position = tf.expand_dims(tf.range(start=0, limit=seq_length), axis=0)
+ position_ids = tf.math.minimum(self.max_position_embeddings - 1, position - first_position)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ position_embeddings = tf.gather(self.position_embeddings, indices=position_ids)
+
+ final_embeddings = inputs_embeds + position_embeddings
+
+ for i in range(self.number_of_token_type_embeddings):
+ name = f"token_type_embeddings_{i}"
+ final_embeddings += tf.gather(params=getattr(self, name), indices=token_type_ids[:, :, i])
+
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention with Bert->Tapas
+class TFTapasSelfAttention(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number "
+ f"of attention heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
+
+ self.query = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
+ )
+ self.key = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
+ )
+ self.value = keras.layers.Dense(
+ units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
+ )
+ self.dropout = keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
+
+ self.is_decoder = config.is_decoder
+ self.config = config
+
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
+
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ encoder_hidden_states: tf.Tensor,
+ encoder_attention_mask: tf.Tensor,
+ past_key_value: Tuple[tf.Tensor],
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ batch_size = shape_list(hidden_states)[0]
+ mixed_query_layer = self.query(inputs=hidden_states)
+
+ # If this is instantiated as a cross-attention module, the keys
+ # and values come from an encoder; the attention mask needs to be
+ # such that the encoder's padding tokens are not attended to.
+ is_cross_attention = encoder_hidden_states is not None
+
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_layer = past_key_value[0]
+ value_layer = past_key_value[1]
+ attention_mask = encoder_attention_mask
+ elif is_cross_attention:
+ key_layer = self.transpose_for_scores(self.key(inputs=encoder_hidden_states), batch_size)
+ value_layer = self.transpose_for_scores(self.value(inputs=encoder_hidden_states), batch_size)
+ attention_mask = encoder_attention_mask
+ elif past_key_value is not None:
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
+ key_layer = tf.concat([past_key_value[0], key_layer], axis=2)
+ value_layer = tf.concat([past_key_value[1], value_layer], axis=2)
+ else:
+ key_layer = self.transpose_for_scores(self.key(inputs=hidden_states), batch_size)
+ value_layer = self.transpose_for_scores(self.value(inputs=hidden_states), batch_size)
+
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_layer, value_layer)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ # (batch size, num_heads, seq_len_q, seq_len_k)
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
+ attention_scores = tf.divide(attention_scores, dk)
+
+ if attention_mask is not None:
+ # Apply the attention mask is (precomputed for all layers in TFTapasModel call() function)
+ attention_scores = tf.add(attention_scores, attention_mask)
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = stable_softmax(logits=attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(inputs=attention_probs, training=training)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attention_probs = tf.multiply(attention_probs, head_mask)
+
+ attention_output = tf.matmul(attention_probs, value_layer)
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
+
+ # (batch_size, seq_len_q, all_head_size)
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
+ outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
+
+ if self.is_decoder:
+ outputs = outputs + (past_key_value,)
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "query", None) is not None:
+ with tf.name_scope(self.query.name):
+ self.query.build([None, None, self.config.hidden_size])
+ if getattr(self, "key", None) is not None:
+ with tf.name_scope(self.key.name):
+ self.key.build([None, None, self.config.hidden_size])
+ if getattr(self, "value", None) is not None:
+ with tf.name_scope(self.value.name):
+ self.value.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->Tapas
+class TFTapasSelfOutput(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertAttention with Bert->Tapas
+class TFTapasAttention(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.self_attention = TFTapasSelfAttention(config, name="self")
+ self.dense_output = TFTapasSelfOutput(config, name="output")
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(
+ self,
+ input_tensor: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ encoder_hidden_states: tf.Tensor,
+ encoder_attention_mask: tf.Tensor,
+ past_key_value: Tuple[tf.Tensor],
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ self_outputs = self.self_attention(
+ hidden_states=input_tensor,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ attention_output = self.dense_output(
+ hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
+ )
+ # add attentions (possibly with past_key_value) if we output them
+ outputs = (attention_output,) + self_outputs[1:]
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attention", None) is not None:
+ with tf.name_scope(self.self_attention.name):
+ self.self_attention.build(None)
+ if getattr(self, "dense_output", None) is not None:
+ with tf.name_scope(self.dense_output.name):
+ self.dense_output.build(None)
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->Tapas
+class TFTapasIntermediate(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->Tapas
+class TFTapasOutput(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertLayer with Bert->Tapas
+class TFTapasLayer(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.attention = TFTapasAttention(config, name="attention")
+ self.is_decoder = config.is_decoder
+ self.add_cross_attention = config.add_cross_attention
+ if self.add_cross_attention:
+ if not self.is_decoder:
+ raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
+ self.crossattention = TFTapasAttention(config, name="crossattention")
+ self.intermediate = TFTapasIntermediate(config, name="intermediate")
+ self.bert_output = TFTapasOutput(config, name="output")
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ encoder_hidden_states: tf.Tensor | None,
+ encoder_attention_mask: tf.Tensor | None,
+ past_key_value: Tuple[tf.Tensor] | None,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ self_attention_outputs = self.attention(
+ input_tensor=hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_value=self_attn_past_key_value,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ attention_output = self_attention_outputs[0]
+
+ # if decoder, the last output is tuple of self-attn cache
+ if self.is_decoder:
+ outputs = self_attention_outputs[1:-1]
+ present_key_value = self_attention_outputs[-1]
+ else:
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ cross_attn_present_key_value = None
+ if self.is_decoder and encoder_hidden_states is not None:
+ if not hasattr(self, "crossattention"):
+ raise ValueError(
+ f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
+ " by setting `config.add_cross_attention=True`"
+ )
+
+ # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ cross_attention_outputs = self.crossattention(
+ input_tensor=attention_output,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ attention_output = cross_attention_outputs[0]
+ outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights
+
+ # add cross-attn cache to positions 3,4 of present_key_value tuple
+ cross_attn_present_key_value = cross_attention_outputs[-1]
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ intermediate_output = self.intermediate(hidden_states=attention_output)
+ layer_output = self.bert_output(
+ hidden_states=intermediate_output, input_tensor=attention_output, training=training
+ )
+ outputs = (layer_output,) + outputs # add attentions if we output them
+
+ # if decoder, return the attn key/values as the last output
+ if self.is_decoder:
+ outputs = outputs + (present_key_value,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "bert_output", None) is not None:
+ with tf.name_scope(self.bert_output.name):
+ self.bert_output.build(None)
+ if getattr(self, "crossattention", None) is not None:
+ with tf.name_scope(self.crossattention.name):
+ self.crossattention.build(None)
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertEncoder with Bert->Tapas
+class TFTapasEncoder(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.layer = [TFTapasLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ head_mask: tf.Tensor,
+ encoder_hidden_states: tf.Tensor | None,
+ encoder_attention_mask: tf.Tensor | None,
+ past_key_values: Tuple[Tuple[tf.Tensor]] | None,
+ use_cache: Optional[bool],
+ output_attentions: bool,
+ output_hidden_states: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]:
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+
+ next_decoder_cache = () if use_cache else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ past_key_value = past_key_values[i] if past_key_values is not None else None
+
+ layer_outputs = layer_module(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ head_mask=head_mask[i],
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[-1],)
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+ if self.config.add_cross_attention and encoder_hidden_states is not None:
+ all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v for v in [hidden_states, all_hidden_states, all_attentions, all_cross_attentions] if v is not None
+ )
+
+ return TFBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_decoder_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->Tapas
+class TFTapasPooler(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(inputs=first_token_tensor)
+
+ return pooled_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Tapas
+class TFTapasPredictionHeadTransform(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="dense",
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.transform_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.transform_act_fn = config.hidden_act
+
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.transform_act_fn(hidden_states)
+ hidden_states = self.LayerNorm(inputs=hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Tapas
+class TFTapasLMPredictionHead(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, input_embeddings: keras.layers.Layer, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.hidden_size = config.hidden_size
+
+ self.transform = TFTapasPredictionHeadTransform(config, name="transform")
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.input_embeddings = input_embeddings
+
+ def build(self, input_shape=None):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "transform", None) is not None:
+ with tf.name_scope(self.transform.name):
+ self.transform.build(None)
+
+ def get_output_embeddings(self) -> keras.layers.Layer:
+ return self.input_embeddings
+
+ def set_output_embeddings(self, value: tf.Variable):
+ self.input_embeddings.weight = value
+ self.input_embeddings.vocab_size = shape_list(value)[0]
+
+ def get_bias(self) -> Dict[str, tf.Variable]:
+ return {"bias": self.bias}
+
+ def set_bias(self, value: tf.Variable):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.transform(hidden_states=hidden_states)
+ seq_length = shape_list(hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
+ hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Tapas
+class TFTapasMLMHead(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, input_embeddings: keras.layers.Layer, **kwargs):
+ super().__init__(**kwargs)
+
+ self.predictions = TFTapasLMPredictionHead(config, input_embeddings, name="predictions")
+
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
+ prediction_scores = self.predictions(hidden_states=sequence_output)
+
+ return prediction_scores
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "predictions", None) is not None:
+ with tf.name_scope(self.predictions.name):
+ self.predictions.build(None)
+
+
+@keras_serializable
+class TFTapasMainLayer(keras.layers.Layer):
+ config_class = TapasConfig
+
+ def __init__(self, config: TapasConfig, add_pooling_layer: bool = True, **kwargs):
+ requires_backends(self, "tensorflow_probability")
+ super().__init__(**kwargs)
+
+ self.config = config
+
+ self.embeddings = TFTapasEmbeddings(config, name="embeddings")
+ self.encoder = TFTapasEncoder(config, name="encoder")
+ self.pooler = TFTapasPooler(config, name="pooler") if add_pooling_layer else None
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.embeddings
+
+ def set_input_embeddings(self, value: tf.Variable):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=input_shape, value=1)
+
+ if token_type_ids is None:
+ token_type_ids = tf.fill(dims=input_shape + [len(self.config.type_vocab_sizes)], value=0)
+
+ embedding_output = self.embeddings(
+ input_ids=input_ids,
+ position_ids=position_ids,
+ token_type_ids=token_type_ids,
+ inputs_embeds=inputs_embeds,
+ training=training,
+ )
+
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.config.num_hidden_layers
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(hidden_states=sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (
+ sequence_output,
+ pooled_output,
+ ) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+
+
+class TFTapasPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = TapasConfig
+ base_model_prefix = "tapas"
+
+ @property
+ def input_signature(self):
+ return {
+ "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
+ "attention_mask": tf.TensorSpec((None, None), tf.float32, name="attention_mask"),
+ "token_type_ids": tf.TensorSpec((None, None, 7), tf.int32, name="token_type_ids"),
+ }
+
+
+TAPAS_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Parameters:
+ config ([`TapasConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+TAPAS_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0}, 7)`, *optional*):
+ Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this
+ class for more info.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. If
+ `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be
+ used. Selected in the range `[0, config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`np.ndarray` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False``):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare Tapas Model transformer outputting raw hidden-states without any specific head on top.",
+ TAPAS_START_DOCSTRING,
+)
+class TFTapasModel(TFTapasPreTrainedModel):
+ def __init__(self, config: TapasConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.tapas = TFTapasMainLayer(config, name="tapas")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, TapasModel
+ >>> import pandas as pd
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
+ >>> model = TapasModel.from_pretrained("google/tapas-base")
+
+ >>> data = {
+ ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
+ ... "Age": ["56", "45", "59"],
+ ... "Number of movies": ["87", "53", "69"],
+ ... }
+ >>> table = pd.DataFrame.from_dict(data)
+ >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
+
+ >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf")
+ >>> outputs = model(**inputs)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ ```"""
+ outputs = self.tapas(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "tapas", None) is not None:
+ with tf.name_scope(self.tapas.name):
+ self.tapas.build(None)
+
+
+@add_start_docstrings("""Tapas Model with a `language modeling` head on top.""", TAPAS_START_DOCSTRING)
+class TFTapasForMaskedLM(TFTapasPreTrainedModel, TFMaskedLanguageModelingLoss):
+ def __init__(self, config: TapasConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ if config.is_decoder:
+ logger.warning(
+ "If you want to use `TFTapasForMaskedLM` make sure `config.is_decoder=False` for "
+ "bi-directional self-attention."
+ )
+
+ self.tapas = TFTapasMainLayer(config, add_pooling_layer=False, name="tapas")
+ self.lm_head = TFTapasMLMHead(config, input_embeddings=self.tapas.embeddings, name="cls")
+
+ def get_lm_head(self) -> keras.layers.Layer:
+ return self.lm_head.predictions
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFMaskedLMOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, TapasForMaskedLM
+ >>> import pandas as pd
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base")
+ >>> model = TapasForMaskedLM.from_pretrained("google/tapas-base")
+
+ >>> data = {
+ ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
+ ... "Age": ["56", "45", "59"],
+ ... "Number of movies": ["87", "53", "69"],
+ ... }
+ >>> table = pd.DataFrame.from_dict(data)
+
+ >>> inputs = tokenizer(
+ ... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="tf"
+ ... )
+ >>> labels = tokenizer(
+ ... table=table, queries="How many movies has George Clooney played in?", return_tensors="tf"
+ ... )["input_ids"]
+
+ >>> outputs = model(**inputs, labels=labels)
+ >>> logits = outputs.logits
+ ```"""
+ outputs = self.tapas(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "tapas", None) is not None:
+ with tf.name_scope(self.tapas.name):
+ self.tapas.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build(None)
+
+
+class TFTapasComputeTokenLogits(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.temperature = config.temperature
+ # cell selection heads
+ with tf.name_scope("output"):
+ self.output_weights = self.add_weight(
+ name="output_weights",
+ shape=(config.hidden_size,),
+ dtype=tf.float32,
+ trainable=True,
+ initializer=tf.zeros_initializer()
+ if config.init_cell_selection_weights_to_zero
+ else keras.initializers.TruncatedNormal(stddev=config.initializer_range),
+ )
+ self.output_bias = self.add_weight(
+ name="output_bias", shape=(), trainable=True, initializer=tf.zeros_initializer()
+ )
+
+ def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
+ """
+ Computes logits per token
+
+ Args:
+ sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the
+ model.
+
+ Returns:
+ logits (`tf.Tensor` of shape `(batch_size, sequence_length)`): Logits per token.
+ """
+ logits = (tf.einsum("bsj,j->bs", sequence_output, self.output_weights) + self.output_bias) / self.temperature
+ return logits
+
+
+class TFTapasComputeColumnLogits(keras.layers.Layer):
+ def __init__(self, config: TapasConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ with tf.name_scope("column_output"):
+ self.column_output_weights = self.add_weight(
+ name="column_output_weights",
+ shape=[config.hidden_size],
+ dtype=tf.float32,
+ trainable=True,
+ initializer=tf.zeros_initializer()
+ if config.init_cell_selection_weights_to_zero
+ else keras.initializers.TruncatedNormal(stddev=config.initializer_range),
+ )
+ self.column_output_bias = self.add_weight(
+ name="column_output_bias", shape=(), trainable=True, initializer=tf.zeros_initializer()
+ )
+
+ def call(self, sequence_output, cell_index, cell_mask, allow_empty_column_selection) -> tf.Tensor:
+ """
+ Computes the column logits.
+
+ Args:
+ sequence_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the
+ model.
+ cell_index (`ProductIndexMap`):
+ Index that groups tokens into cells.
+ cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`):
+ Mask for cells that exist in the table (i.e. that are not padding).
+ allow_empty_column_selection (`bool`):
+ Whether to allow not to select any column
+
+ Returns:
+ column_logits (`tf.Tensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits for
+ every example in the batch.
+ """
+
+ # First, compute the token logits (batch_size, seq_len) - without temperature
+ token_logits = tf.einsum("bsj,j->bs", sequence_output, self.column_output_weights) + self.column_output_bias
+
+ # Next, average the logits per cell (batch_size, max_num_cols*max_num_rows)
+ cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index)
+
+ # Finally, average the logits per column (batch_size, max_num_cols)
+ column_index = cell_index.project_inner(cell_logits_index)
+ column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index)
+
+ cell_count, _ = reduce_sum(cell_mask, column_index)
+ column_logits /= cell_count + EPSILON_ZERO_DIVISION
+
+ # Mask columns that do not appear in the example.
+ is_padding = tf.logical_and(cell_count < 0.5, tf.not_equal(out_index.indices, 0))
+ column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(is_padding, tf.float32)
+
+ if not allow_empty_column_selection:
+ column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * tf.cast(tf.equal(out_index.indices, 0), tf.float32)
+
+ return column_logits
+
+
+@add_start_docstrings(
+ """
+ Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables
+ (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for
+ SQA, WTQ or WikiSQL-supervised tasks.
+ """,
+ TAPAS_START_DOCSTRING,
+)
+class TFTapasForQuestionAnswering(TFTapasPreTrainedModel):
+ def __init__(self, config: TapasConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ # base model
+ self.tapas = TFTapasMainLayer(config, name="tapas")
+
+ # dropout
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+
+ self.compute_token_logits = TFTapasComputeTokenLogits(config, name="compute_token_logits")
+
+ self.compute_column_logits = TFTapasComputeColumnLogits(config, name="compute_column_logits")
+
+ if config.num_aggregation_labels > 0:
+ self.aggregation_classifier = keras.layers.Dense(
+ config.num_aggregation_labels,
+ kernel_initializer=get_initializer(config.initializer_range),
+ name="aggregation_classifier",
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFTableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ table_mask: np.ndarray | tf.Tensor | None = None,
+ aggregation_labels: np.ndarray | tf.Tensor | None = None,
+ float_answer: np.ndarray | tf.Tensor | None = None,
+ numeric_values: np.ndarray | tf.Tensor | None = None,
+ numeric_values_scale: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFTableQuestionAnsweringOutput, Tuple[tf.Tensor]]:
+ r"""
+ table_mask (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*):
+ Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and
+ padding are 0.
+ labels (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*):
+ Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the
+ answer appearing in the table. Can be obtained using [`AutoTokenizer`].
+
+ - 1 for tokens that are **part of the answer**,
+ - 0 for tokens that are **not part of the answer**.
+
+ aggregation_labels (`tf.Tensor` of shape `(batch_size, )`, *optional*):
+ Aggregation function index for every example in the batch for computing the aggregation loss. Indices
+ should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for
+ aggregation (WikiSQL-supervised).
+ float_answer (`tf.Tensor` of shape `(batch_size, )`, *optional*):
+ Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only
+ required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss.
+ numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*):
+ Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using
+ [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the
+ regression loss.
+ numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`, *optional*):
+ Scale of the numeric values of every token. Can be obtained using [`AutoTokenizer`]. Only required in case
+ of weak supervision for aggregation (WTQ) to calculate the regression loss.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, TapasForQuestionAnswering
+ >>> import pandas as pd
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-wtq")
+ >>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq")
+
+ >>> data = {
+ ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
+ ... "Age": ["56", "45", "59"],
+ ... "Number of movies": ["87", "53", "69"],
+ ... }
+ >>> table = pd.DataFrame.from_dict(data)
+ >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"]
+
+ >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf")
+ >>> outputs = model(**inputs)
+
+ >>> logits = outputs.logits
+ >>> logits_aggregation = outputs.logits_aggregation
+ ```"""
+
+ outputs = self.tapas(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = outputs[0]
+ pooled_output = outputs[1]
+
+ sequence_output = self.dropout(sequence_output)
+
+ if input_ids is not None:
+ input_shape = shape_list(input_ids)
+ else:
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ # Construct indices for the table.
+ if token_type_ids is None:
+ token_type_ids = tf.fill(input_shape + [len(self.config.type_vocab_sizes)], 0)
+
+ token_types = [
+ "segment_ids",
+ "column_ids",
+ "row_ids",
+ "prev_labels",
+ "column_ranks",
+ "inv_column_ranks",
+ "numeric_relations",
+ ]
+
+ row_ids = token_type_ids[:, :, token_types.index("row_ids")]
+ column_ids = token_type_ids[:, :, token_types.index("column_ids")]
+
+ # Construct indices for the table.
+ row_index = IndexMap(
+ indices=tf.minimum(tf.cast(row_ids, tf.int32), self.config.max_num_rows - 1),
+ num_segments=self.config.max_num_rows,
+ batch_dims=1,
+ )
+ col_index = IndexMap(
+ indices=tf.minimum(tf.cast(column_ids, tf.int32), self.config.max_num_columns - 1),
+ num_segments=self.config.max_num_columns,
+ batch_dims=1,
+ )
+ cell_index = ProductIndexMap(row_index, col_index)
+
+ # Masks.
+ input_shape = shape_list(input_ids) if input_ids is not None else shape_list(inputs_embeds)[:-1]
+ if attention_mask is None:
+ attention_mask = tf.ones(input_shape)
+ # Table cells only, without question tokens and table headers.
+ if table_mask is None:
+ table_mask = tf.where(row_ids > 0, tf.ones_like(row_ids), tf.zeros_like(row_ids))
+ # [batch_size, seq_length]
+ input_mask_float = tf.cast(attention_mask, tf.float32)
+ table_mask_float = tf.cast(table_mask, tf.float32)
+
+ # Mask for cells that exist in the table (i.e. that are not padding).
+ cell_mask, _ = reduce_mean(input_mask_float, cell_index)
+
+ # Compute logits per token. These are used to select individual cells.
+ logits = self.compute_token_logits(sequence_output)
+
+ # Compute logits per column. These are used to select a column.
+ column_logits = None
+ if self.config.select_one_column:
+ column_logits = self.compute_column_logits(
+ sequence_output, cell_index, cell_mask, self.config.allow_empty_column_selection
+ )
+
+ # Aggregate logits.
+ logits_aggregation = None
+ if self.config.num_aggregation_labels > 0:
+ logits_aggregation = self.aggregation_classifier(pooled_output)
+
+ # Total loss calculation
+ total_loss = tf.zeros(shape=(1,), dtype=tf.float32)
+ calculate_loss = False
+ if labels is not None:
+ calculate_loss = True
+ is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision
+
+ # Semi-supervised cell selection in case of no aggregation:
+ # If the answer (the denotation) appears directly in the table we might
+ # select the answer without applying any aggregation function. There are
+ # some ambiguous cases, see utils._calculate_aggregate_mask for more info.
+ # `aggregate_mask` is 1 for examples where we chose to aggregate and 0
+ # for examples where we chose to select the answer directly.
+ # `labels` encodes the positions of the answer appearing in the table.
+ if is_supervised:
+ aggregate_mask = None
+ else:
+ if float_answer is not None:
+ assert (
+ shape_list(labels)[0] == shape_list(float_answer)[0]
+ ), "Make sure the answers are a FloatTensor of shape (batch_size,)"
+ # [batch_size]
+ aggregate_mask = _calculate_aggregate_mask(
+ float_answer,
+ pooled_output,
+ self.config.cell_selection_preference,
+ labels,
+ self.aggregation_classifier,
+ )
+ else:
+ aggregate_mask = None
+ raise ValueError("You have to specify float answers in order to calculate the aggregate mask")
+
+ # Cell selection log-likelihood
+ if self.config.average_logits_per_cell:
+ logits_per_cell, _ = reduce_mean(logits, cell_index)
+ logits = gather(logits_per_cell, cell_index)
+ dist_per_token = tfp.distributions.Bernoulli(logits=logits)
+
+ # Compute cell selection loss per example.
+ selection_loss_per_example = None
+ if not self.config.select_one_column:
+ weight = tf.where(
+ labels == 0,
+ tf.ones_like(labels, dtype=tf.float32),
+ self.config.positive_label_weight * tf.ones_like(labels, dtype=tf.float32),
+ )
+ selection_loss_per_token = -dist_per_token.log_prob(labels) * weight
+ selection_loss_per_example = tf.reduce_sum(selection_loss_per_token * input_mask_float, axis=1) / (
+ tf.reduce_sum(input_mask_float, axis=1) + EPSILON_ZERO_DIVISION
+ )
+ else:
+ selection_loss_per_example, logits = _single_column_cell_selection_loss(
+ logits, column_logits, labels, cell_index, col_index, cell_mask
+ )
+ dist_per_token = tfp.distributions.Bernoulli(logits=logits)
+
+ # Supervised cell selection
+ if self.config.disable_per_token_loss:
+ pass
+ elif is_supervised:
+ total_loss += tf.reduce_mean(selection_loss_per_example)
+ else:
+ # For the not supervised case, do not assign loss for cell selection
+ total_loss += tf.reduce_mean(selection_loss_per_example * (1.0 - aggregate_mask))
+
+ # Semi-supervised regression loss and supervised loss for aggregations
+ if self.config.num_aggregation_labels > 0:
+ if is_supervised:
+ # Note that `aggregate_mask` is None if the setting is supervised.
+ if aggregation_labels is not None:
+ assert (
+ shape_list(labels)[0] == shape_list(aggregation_labels)[0]
+ ), "Make sure the aggregation labels are a LongTensor of shape (batch_size,)"
+ per_example_additional_loss = _calculate_aggregation_loss(
+ logits_aggregation,
+ aggregate_mask,
+ aggregation_labels,
+ self.config.use_answer_as_supervision,
+ self.config.num_aggregation_labels,
+ self.config.aggregation_loss_weight,
+ )
+ else:
+ raise ValueError(
+ "You have to specify aggregation labels in order to calculate the aggregation loss"
+ )
+ else:
+ aggregation_labels = tf.zeros(shape_list(labels)[0], dtype=tf.int32)
+ per_example_additional_loss = _calculate_aggregation_loss(
+ logits_aggregation,
+ aggregate_mask,
+ aggregation_labels,
+ self.config.use_answer_as_supervision,
+ self.config.num_aggregation_labels,
+ self.config.aggregation_loss_weight,
+ )
+
+ if self.config.use_answer_as_supervision:
+ if numeric_values is not None and numeric_values_scale is not None:
+ assert shape_list(numeric_values) == shape_list(numeric_values_scale)
+ # Add regression loss for numeric answers which require aggregation.
+ answer_loss, large_answer_loss_mask = _calculate_regression_loss(
+ float_answer,
+ aggregate_mask,
+ dist_per_token,
+ numeric_values,
+ numeric_values_scale,
+ table_mask_float,
+ logits_aggregation,
+ self.config,
+ )
+ per_example_additional_loss += answer_loss
+ # Zero loss for examples with answer_loss > cutoff.
+ per_example_additional_loss *= large_answer_loss_mask
+ else:
+ raise ValueError(
+ "You have to specify numeric values and numeric values scale in order to calculate the"
+ " regression loss"
+ )
+ total_loss += tf.reduce_mean(per_example_additional_loss)
+
+ else:
+ # if no label ids are provided, set them to zeros in order to properly compute logits
+ labels = tf.zeros_like(logits)
+ _, logits = _single_column_cell_selection_loss(
+ logits, column_logits, labels, cell_index, col_index, cell_mask
+ )
+ if not return_dict:
+ output = (logits, logits_aggregation) + outputs[2:]
+ return ((total_loss,) + output) if calculate_loss else output
+
+ return TFTableQuestionAnsweringOutput(
+ loss=total_loss if calculate_loss else None,
+ logits=logits,
+ logits_aggregation=logits_aggregation,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "tapas", None) is not None:
+ with tf.name_scope(self.tapas.name):
+ self.tapas.build(None)
+ if getattr(self, "compute_token_logits", None) is not None:
+ with tf.name_scope(self.compute_token_logits.name):
+ self.compute_token_logits.build(None)
+ if getattr(self, "compute_column_logits", None) is not None:
+ with tf.name_scope(self.compute_column_logits.name):
+ self.compute_column_logits.build(None)
+ if getattr(self, "aggregation_classifier", None) is not None:
+ with tf.name_scope(self.aggregation_classifier.name):
+ self.aggregation_classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table
+ entailment tasks, such as TabFact (Chen et al., 2020).
+ """,
+ TAPAS_START_DOCSTRING,
+)
+class TFTapasForSequenceClassification(TFTapasPreTrainedModel, TFSequenceClassificationLoss):
+ def __init__(self, config: TapasConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.tapas = TFTapasMainLayer(config, name="tapas")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob, name="dropout")
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @replace_return_docstrings(output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ token_type_ids: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: np.ndarray | tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called
+ "classification_class_index" in the original implementation.
+
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import AutoTokenizer, TapasForSequenceClassification
+ >>> import tensorflow as tf
+ >>> import pandas as pd
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact")
+ >>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact")
+
+ >>> data = {
+ ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"],
+ ... "Age": ["56", "45", "59"],
+ ... "Number of movies": ["87", "53", "69"],
+ ... }
+ >>> table = pd.DataFrame.from_dict(data)
+ >>> queries = [
+ ... "There is only one actor who is 45 years old",
+ ... "There are 3 actors which played in more than 60 movies",
+ ... ]
+
+ >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="tf")
+ >>> labels = tf.convert_to_tensor([1, 0]) # 1 means entailed, 0 means refuted
+
+ >>> outputs = model(**inputs, labels=labels)
+ >>> loss = outputs.loss
+ >>> logits = outputs.logits
+ ```"""
+
+ outputs = self.tapas(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ pooled_output = outputs[1]
+ pooled_output = self.dropout(inputs=pooled_output, training=training)
+ logits = self.classifier(inputs=pooled_output)
+ loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "tapas", None) is not None:
+ with tf.name_scope(self.tapas.name):
+ self.tapas.build(None)
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+""" TAPAS utilities."""
+
+
+class AverageApproximationFunction(str, enum.Enum):
+ RATIO = "ratio"
+ FIRST_ORDER = "first_order"
+ SECOND_ORDER = "second_order"
+
+
+# Beginning of everything related to segmented tensors
+
+
+class IndexMap(object):
+ """Index grouping entries within a tensor."""
+
+ def __init__(self, indices, num_segments, batch_dims=0):
+ """
+ Creates an index.
+
+ Args:
+ indices: Tensor of indices, same shape as `values`.
+ num_segments: Scalar tensor, the number of segments. All elements
+ in a batched segmented tensor must have the same number of segments (although many segments can be empty).
+ batch_dims: Python integer, the number of batch dimensions. The first
+ `batch_dims` dimensions of a SegmentedTensor are treated as batch dimensions. Segments in different batch
+ elements are always distinct even if they have the same index.
+ """
+ self.indices = tf.convert_to_tensor(indices)
+ self.num_segments = tf.convert_to_tensor(num_segments)
+ self.batch_dims = batch_dims
+
+ def batch_shape(self):
+ return tf.shape(self.indices)[: self.batch_dims]
+
+
+class ProductIndexMap(IndexMap):
+ """The product of two indices."""
+
+ def __init__(self, outer_index, inner_index):
+ """
+ Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the
+ intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows
+ and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation
+ combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has `num_segments` equal to
+ `outer_index.num_segements` * `inner_index.num_segments`.
+
+ Args:
+ outer_index: IndexMap.
+ inner_index: IndexMap, must have the same shape as `outer_index`.
+ """
+ if outer_index.batch_dims != inner_index.batch_dims:
+ raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.")
+
+ super(ProductIndexMap, self).__init__(
+ indices=(
+ inner_index.indices
+ + outer_index.indices * tf.cast(inner_index.num_segments, inner_index.indices.dtype)
+ ),
+ num_segments=inner_index.num_segments * outer_index.num_segments,
+ batch_dims=inner_index.batch_dims,
+ )
+ self.outer_index = outer_index
+ self.inner_index = inner_index
+
+ def project_outer(self, index):
+ """Projects an index with the same index set onto the outer components."""
+ return IndexMap(
+ indices=tf.math.floordiv(index.indices, self.inner_index.num_segments),
+ num_segments=self.outer_index.num_segments,
+ batch_dims=index.batch_dims,
+ )
+
+ def project_inner(self, index):
+ """Projects an index with the same index set onto the inner components."""
+ return IndexMap(
+ indices=tf.math.floormod(index.indices, self.inner_index.num_segments),
+ num_segments=self.inner_index.num_segments,
+ batch_dims=index.batch_dims,
+ )
+
+
+def gather(values, index, name="segmented_gather"):
+ """
+ Gathers from `values` using the index map. For each element in the domain of the index map this operation looks up
+ a value for that index in `values`. Two elements from the same segment always get assigned the same value.
+
+ Args:
+ values: [B1, ..., Bn, num_segments, V1, ...] Tensor with segment values.
+ index: [B1, ..., Bn, I1, ..., Ik] IndexMap.
+ name: Name for the TensorFlow operation.
+
+ Returns:
+ [B1, ..., Bn, I1, ..., Ik, V1, ...] Tensor with the gathered values.
+ """
+ return tf.gather(values, index.indices, batch_dims=index.batch_dims, name=name)
+
+
+def flatten(index, name="segmented_flatten"):
+ """
+ Flattens a batched index map to a 1d index map. This operation relabels the segments to keep batch elements
+ distinct. The k-th batch element will have indices shifted by `num_segments` * (k - 1). The result is a tensor with
+ `num_segments` multiplied by the number of elements in the batch.
+
+ Args:
+ index: IndexMap to flatten.
+ name: Name for the TensorFlow operation.
+
+ Returns:
+ The flattened IndexMap.
+ """
+ batch_size = tf.reduce_prod(index.batch_shape())
+ offset = tf.range(batch_size) * index.num_segments
+ offset = tf.reshape(offset, index.batch_shape())
+ for _ in range(index.batch_dims, index.indices.shape.rank):
+ offset = tf.expand_dims(offset, -1)
+
+ indices = tf.cast(offset, index.indices.dtype) + index.indices
+ return IndexMap(indices=tf.reshape(indices, [-1]), num_segments=index.num_segments * batch_size, batch_dims=0)
+
+
+def range_index_map(batch_shape, num_segments, name="range_index_map"):
+ """
+ Constructs an index map equal to range(num_segments).
+
+ Args:
+ batch_shape (`tf.Tensor`):
+ Batch shape
+ num_segments (`int`):
+ Number of segments
+ name (`str`, *optional*, defaults to 'range_index_map'):
+ Name for the operation. Currently not used
+
+ Returns:
+ (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
+ """
+ batch_shape = tf.convert_to_tensor(batch_shape)
+ batch_shape.shape.assert_has_rank(1)
+ num_segments = tf.convert_to_tensor(num_segments)
+ num_segments.shape.assert_has_rank(0)
+
+ indices = tf.range(num_segments)
+ shape = tf.concat([tf.ones_like(batch_shape, dtype=tf.int32), tf.expand_dims(num_segments, axis=0)], axis=0)
+ indices = tf.reshape(indices, shape)
+ multiples = tf.concat([batch_shape, [1]], axis=0)
+ indices = tf.tile(indices, multiples)
+ return IndexMap(indices=indices, num_segments=num_segments, batch_dims=batch_shape.shape.as_list()[0])
+
+
+def _segment_reduce(values, index, segment_reduce_fn, name):
+ """
+ Applies a segment reduction segment-wise.
+
+ Args:
+ values (`tf.Tensor`):
+ Tensor with segment values.
+ index (`IndexMap`):
+ IndexMap.
+ segment_reduce_fn (`str`):
+ Name for the reduce operation. One of "sum", "mean", "max" or "min".
+ name (`str`):
+ Name for the operation. Currently not used
+
+ Returns:
+ (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
+ """
+ # Flatten the batch dimensions, as segments ops do not support batching.
+ # However if `values` has extra dimensions to the right keep them
+ # unflattened. Segmented ops support vector-valued operations.
+ flat_index = flatten(index)
+ vector_shape = tf.shape(values)[index.indices.shape.rank :]
+ flattened_shape = tf.concat([[-1], vector_shape], axis=0)
+ flat_values = tf.reshape(values, flattened_shape)
+ segment_means = segment_reduce_fn(
+ data=flat_values, segment_ids=flat_index.indices, num_segments=flat_index.num_segments
+ )
+
+ # Unflatten the values.
+ new_shape = tf.concat([index.batch_shape(), [index.num_segments], vector_shape], axis=0)
+ output_values = tf.reshape(segment_means, new_shape)
+ output_index = range_index_map(index.batch_shape(), index.num_segments)
+ return output_values, output_index
+
+
+def reduce_mean(values, index, name="segmented_reduce_mean"):
+ """
+ Averages a tensor over its segments. Outputs 0 for empty segments. This operations computes the mean over segments,
+ with support for:
+
+ - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
+ - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be a mean of vectors
+ rather than scalars.
+ Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
+
+ Args:
+ values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be
+ averaged.
+ index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments.
+ name: Name for the TensorFlow ops.
+
+ Returns:
+ A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments,
+ V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments].
+ """
+ return _segment_reduce(values, index, tf.math.unsorted_segment_mean, name)
+
+
+def reduce_sum(values, index, name="segmented_reduce_sum"):
+ """
+ Sums a tensor over its segments. Outputs 0 for empty segments. This operations computes the sum over segments, with
+ support for:
+
+ - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
+ - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be a sum of vectors
+ rather than scalars.
+ Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
+
+ Args:
+ values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be
+ averaged.
+ index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments.
+ name: Name for the TensorFlow ops.
+
+ Returns:
+ A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments,
+ V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments].
+ """
+ return _segment_reduce(values, index, tf.math.unsorted_segment_sum, name)
+
+
+def reduce_max(values, index, name="segmented_reduce_max"):
+ """
+ Computes the maximum over segments. This operations computes the maximum over segments, with support for:
+
+ - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices.
+ - Vectorization using the last dimension [V1, V2, ...]. If they are present the output will be an element-wise
+ maximum of vectors rather than scalars.
+ Only the middle dimensions [I1, ..., Ik] are reduced by the operation.
+
+ Args:
+ values: [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..] tensor of values to be
+ averaged.
+ index: IndexMap [B1, B2, ..., Bn, I1, .., Ik] index defining the segments.
+ name: Name for the TensorFlow ops.
+
+ Returns:
+ A pair (output_values, output_index) where `output_values` is a tensor of shape [B1, B2, ..., Bn, num_segments,
+ V1, V2, ..] and `index` is an IndexMap with shape [B1, B2, ..., Bn, num_segments].
+ """
+ return _segment_reduce(values, index, tf.math.unsorted_segment_max, name)
+
+
+def reduce_min(values, index, name="segmented_reduce_min"):
+ """Computes the minimum over segments."""
+ return _segment_reduce(values, index, tf.math.unsorted_segment_min, name)
+
+
+def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask):
+ """
+ Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The
+ model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside
+ the selected column are never selected.
+
+ Args:
+ token_logits (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Tensor containing the logits per token.
+ column_logits (`tf.Tensor` of shape `(batch_size, max_num_cols)`):
+ Tensor containing the logits per column.
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Labels per token.
+ cell_index (`ProductIndexMap`):
+ Index that groups tokens into cells.
+ col_index (`IndexMap`):
+ Index that groups tokens into columns.
+ cell_mask (`tf.Tensor` of shape `(batch_size, max_num_rows * max_num_cols)`):
+ Mask for cells that exist in the table (i.e. that are not padding).
+
+ Returns:
+ selection_loss_per_example (`tf.Tensor` of shape `(batch_size,)`): Loss for each example. logits (`tf.Tensor`
+ of shape `(batch_size, sequence_length)`): New logits which are only allowed to select cells in a single
+ column. Logits outside of the most likely column according to *column_logits* will be set to a very low value
+ (such that the probabilities are 0).
+ """
+ # First find the column we should select. We use the column with maximum
+ # number of selected cells.
+ labels_per_column, _ = reduce_sum(tf.cast(labels, tf.float32), col_index)
+ column_label = tf.argmax(labels_per_column, axis=-1, output_type=tf.int32)
+ # Check if there are no selected cells in the column. In that case the model
+ # should predict the special column id 0, which means "select nothing".
+ no_cell_selected = tf.equal(tf.reduce_max(labels_per_column, axis=-1), 0)
+ column_label = tf.where(no_cell_selected, tf.zeros_like(column_label), column_label)
+
+ column_dist = tfp.distributions.Categorical(logits=column_logits)
+ column_loss_per_example = -column_dist.log_prob(column_label)
+
+ # Reduce the labels and logits to per-cell from per-token.
+ logits_per_cell, _ = reduce_mean(token_logits, cell_index)
+ labels_per_cell, labels_index = reduce_max(tf.cast(labels, tf.int32), cell_index)
+
+ # Mask for the selected column.
+ column_id_for_cells = cell_index.project_inner(labels_index).indices
+ column_mask = tf.cast(tf.equal(column_id_for_cells, tf.expand_dims(column_label, axis=1)), tf.float32)
+
+ # Compute the log-likelihood for cells, but only for the selected column.
+ cell_dist = tfp.distributions.Bernoulli(logits=logits_per_cell)
+ cell_log_prob = cell_dist.log_prob(labels_per_cell)
+ cell_loss = -tf.reduce_sum(cell_log_prob * column_mask * cell_mask, axis=1)
+ # We need to normalize the loss by the number of cells in the column.
+ cell_loss /= tf.reduce_sum(column_mask * cell_mask, axis=1) + EPSILON_ZERO_DIVISION
+
+ selection_loss_per_example = column_loss_per_example
+ selection_loss_per_example += tf.where(no_cell_selected, tf.zeros_like(selection_loss_per_example), cell_loss)
+
+ # Set the probs outside the selected column (selected by the *model*)
+ # to 0. This ensures backwards compatibility with models that select
+ # cells from multiple columns.
+ selected_column_id = tf.argmax(column_logits, axis=-1, output_type=tf.int32)
+ selected_column_mask = tf.cast(
+ tf.equal(column_id_for_cells, tf.expand_dims(selected_column_id, axis=-1)), tf.float32
+ )
+ # Never select cells with the special column id 0.
+ selected_column_mask = tf.where(
+ tf.equal(column_id_for_cells, 0), tf.zeros_like(selected_column_mask), selected_column_mask
+ )
+ logits_per_cell += CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask)
+ logits = gather(logits_per_cell, cell_index)
+
+ return selection_loss_per_example, logits
+
+
+def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier):
+ """
+ Finds examples where the model should select cells with no aggregation.
+
+ Returns a mask that determines for which examples should the model select answers directly from the table, without
+ any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only
+ apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation
+ case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the
+ aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold
+ for this is a hyperparameter *cell_selection_preference*
+
+ Args:
+ answer (`tf.Tensor` of shape `(batch_size, )`):
+ Answer for every example in the batch. Nan if there is no scalar answer.
+ pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`):
+ Output of the pooler (BertPooler) on top of the encoder layer.
+ cell_selection_preference (`float`):
+ Preference for cell selection in ambiguous cases.
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Labels per token. aggregation_classifier (`torch.nn.Linear`): Aggregation head
+
+ Returns:
+ aggregate_mask (`tf.Tensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation
+ functions.
+ """
+ # tf.Tensor(batch_size,)
+ aggregate_mask_init = tf.cast(tf.logical_not(tf.math.is_nan(answer)), tf.float32)
+ logits_aggregation = aggregation_classifier(pooled_output)
+ dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation)
+ # Index 0 corresponds to "no aggregation".
+ aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1)
+ # Cell selection examples according to current model.
+ is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference
+ # Examples with non-empty cell selection supervision.
+ is_cell_supervision_available = tf.reduce_sum(labels, axis=1) > 0
+ aggregate_mask = tf.where(
+ tf.logical_and(is_pred_cell_selection, is_cell_supervision_available),
+ tf.zeros_like(aggregate_mask_init, dtype=tf.float32),
+ aggregate_mask_init,
+ )
+ aggregate_mask = tf.stop_gradient(aggregate_mask)
+ return aggregate_mask
+
+
+def _calculate_aggregation_loss_known(
+ logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels
+):
+ """
+ Calculates aggregation loss when its type is known during training.
+
+ In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation"
+ should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting
+ where aggregation type is always known, standard cross entropy loss is accumulated for all examples
+
+ Args:
+ logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`):
+ Logits per aggregation operation.
+ aggregate_mask (`tf.Tensor` of shape `(batch_size, )`):
+ A mask set to 1 for examples that should use aggregation functions.
+ aggregation_labels (`tf.Tensor` of shape `(batch_size, )`):
+ Aggregation function id for every example in the batch.
+ use_answer_as_supervision (`bool`, *optional*):
+ Whether to use the answer as the only supervision for aggregation examples.
+ num_aggregation_labels (`int`, *optional*, defaults to 0):
+ The number of aggregation operators to predict.
+
+ Returns:
+ aggregation_loss_known (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (when its type is known during
+ training) per example.
+ """
+ if use_answer_as_supervision:
+ # Prepare "no aggregation" targets for cell selection examples.
+ target_aggregation = tf.zeros_like(aggregate_mask, dtype=tf.int32)
+ else:
+ # Use aggregation supervision as the target.
+ target_aggregation = aggregation_labels
+
+ one_hot_labels = tf.one_hot(target_aggregation, depth=num_aggregation_labels, dtype=tf.float32)
+ log_probs = tf.nn.log_softmax(logits_aggregation, axis=-1)
+
+ # [batch_size]
+ per_example_aggregation_intermediate = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
+ if use_answer_as_supervision:
+ # Accumulate loss only for examples requiring cell selection
+ # (no aggregation).
+ return per_example_aggregation_intermediate * (1 - aggregate_mask)
+ else:
+ return per_example_aggregation_intermediate
+
+
+def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask):
+ """
+ Calculates aggregation loss in the case of answer supervision.
+
+ Args:
+ logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`):
+ Logits per aggregation operation.
+ aggregate_mask (`tf.Tensor` of shape `(batch_size, )`):
+ A mask set to 1 for examples that should use aggregation functions
+
+ Returns:
+ aggregation_loss_unknown (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss (in case of answer
+ supervision) per example.
+ """
+ dist_aggregation = tfp.distributions.Categorical(logits=logits_aggregation)
+ # Index 0 corresponds to "no aggregation".
+ aggregation_ops_total_mass = tf.reduce_sum(dist_aggregation.probs_parameter()[:, 1:], axis=1)
+ # Predict some aggregation in case of an answer that needs aggregation.
+ # This increases the probability of all aggregation functions, in a way
+ # similar to MML, but without considering whether the function gives the
+ # correct answer.
+ return -tf.math.log(aggregation_ops_total_mass) * aggregate_mask
+
+
+def _calculate_aggregation_loss(
+ logits_aggregation,
+ aggregate_mask,
+ aggregation_labels,
+ use_answer_as_supervision,
+ num_aggregation_labels,
+ aggregation_loss_weight,
+):
+ """
+ Calculates the aggregation loss per example.
+
+ Args:
+ logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`):
+ Logits per aggregation operation.
+ aggregate_mask (`tf.Tensor` of shape `(batch_size, )`):
+ A mask set to 1 for examples that should use aggregation functions.
+ aggregation_labels (`tf.Tensor` of shape `(batch_size, )`):
+ Aggregation function id for every example in the batch.
+ use_answer_as_supervision (`bool`, *optional*):
+ Whether to use the answer as the only supervision for aggregation examples.
+ num_aggregation_labels (`int`, *optional*, defaults to 0):
+ The number of aggregation operators to predict.
+ aggregation_loss_weight (`float`, *optional*, defaults to 1.0):
+ Importance weight for the aggregation loss.
+
+ Returns:
+ aggregation_loss (`tf.Tensor` of shape `(batch_size,)`): Aggregation loss per example.
+ """
+ per_example_aggregation_loss = _calculate_aggregation_loss_known(
+ logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels
+ )
+
+ if use_answer_as_supervision:
+ # Add aggregation loss for numeric answers that need aggregation.
+ per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask)
+ return aggregation_loss_weight * per_example_aggregation_loss
+
+
+def _calculate_expected_result(
+ dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config
+):
+ """
+ Calculates the expected result given cell and aggregation probabilities.
+
+ Args:
+ dist_per_cell (`tfp.distributions.Bernoulli`):
+ Cell selection distribution for each cell.
+ numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`):
+ Numeric values of every token. Nan for tokens which are not numeric values.
+ numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`):
+ Scale of the numeric values of every token.
+ input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`):
+ Mask for the table, without question tokens and table headers.
+ logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`):
+ Logits per aggregation operation.
+ config ([`TapasConfig`]):
+ Model configuration class with all the hyperparameters of the model
+
+ Returns:
+ expected_result (`tf.Tensor` of shape `(batch_size,)`): The expected result per example.
+ """
+ if config.use_gumbel_for_cells:
+ gumbel_dist = tfp.distributions.RelaxedBernoulli(
+ # The token logits where already divided by the temperature and used for
+ # computing cell selection errors so we need to multiply it again here
+ config.temperature,
+ logits=dist_per_cell.logits_parameter() * config.temperature,
+ )
+ scaled_probability_per_cell = gumbel_dist.sample()
+ else:
+ scaled_probability_per_cell = dist_per_cell.probs_parameter()
+
+ # [batch_size, seq_length]
+ scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float
+ count_result = tf.reduce_sum(scaled_probability_per_cell, axis=1)
+ numeric_values_masked = tf.where(
+ tf.math.is_nan(numeric_values), tf.zeros_like(numeric_values), numeric_values
+ ) # Mask non-numeric table values to zero.
+ sum_result = tf.reduce_sum(scaled_probability_per_cell * numeric_values_masked, axis=1)
+ avg_approximation = config.average_approximation_function
+ if avg_approximation == AverageApproximationFunction.RATIO:
+ average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION)
+ elif avg_approximation == AverageApproximationFunction.FIRST_ORDER:
+ # The sum of all probabilities exept that correspond to other cells
+ ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1
+ average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell / ex, axis=1)
+ elif avg_approximation == AverageApproximationFunction.SECOND_ORDER:
+ # The sum of all probabilities exept that correspond to other cells
+ ex = tf.reduce_sum(scaled_probability_per_cell, axis=1, keepdims=True) - scaled_probability_per_cell + 1
+ pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell)
+ var = tf.reduce_sum(pointwise_var, axis=1, keepdims=True) - pointwise_var
+ multiplier = (var / tf.math.square(ex) + 1) / ex
+ average_result = tf.reduce_sum(numeric_values_masked * scaled_probability_per_cell * multiplier, axis=1)
+ else:
+ raise ValueError("Invalid average_approximation_function: %s", config.average_approximation_function)
+
+ if config.use_gumbel_for_aggregation:
+ gumbel_dist = tfp.distributions.RelaxedOneHotCategorical(
+ config.aggregation_temperature, logits=logits_aggregation[:, 1:]
+ )
+ # [batch_size, num_aggregation_labels - 1]
+ aggregation_op_only_probs = gumbel_dist.sample()
+ else:
+ # [batch_size, num_aggregation_labels - 1]
+ aggregation_op_only_probs = stable_softmax(logits_aggregation[:, 1:] / config.aggregation_temperature, axis=-1)
+ all_results = tf.concat(
+ [
+ tf.expand_dims(sum_result, axis=1),
+ tf.expand_dims(average_result, axis=1),
+ tf.expand_dims(count_result, axis=1),
+ ],
+ axis=1,
+ )
+ expected_result = tf.reduce_sum(all_results * aggregation_op_only_probs, axis=1)
+ return expected_result
+
+
+def _calculate_regression_loss(
+ answer,
+ aggregate_mask,
+ dist_per_cell,
+ numeric_values,
+ numeric_values_scale,
+ input_mask_float,
+ logits_aggregation,
+ config,
+):
+ """
+ Calculates the regression loss per example.
+
+ Args:
+ answer (`tf.Tensor` of shape `(batch_size,)`):
+ Answer for every example in the batch. Nan if there is no scalar answer.
+ aggregate_mask (`tf.Tensor` of shape `(batch_size,)`):
+ A mask set to 1 for examples that should use aggregation functions.
+ dist_per_cell (`torch.distributions.Bernoulli`):
+ Cell selection distribution for each cell.
+ numeric_values (`tf.Tensor` of shape `(batch_size, seq_length)`):
+ Numeric values of every token. Nan for tokens which are not numeric values.
+ numeric_values_scale (`tf.Tensor` of shape `(batch_size, seq_length)`):
+ Scale of the numeric values of every token.
+ input_mask_float (`tf.Tensor` of shape `(batch_size, seq_length)`):
+ Mask for the table, without question tokens and table headers.
+ logits_aggregation (`tf.Tensor` of shape `(batch_size, num_aggregation_labels)`):
+ Logits per aggregation operation.
+ config ([`TapasConfig`]):
+ Model configuration class with all the parameters of the model
+
+ Returns:
+ per_example_answer_loss_scaled (`tf.Tensor` of shape `(batch_size,)`): Scales answer loss for each example in
+ the batch. large_answer_loss_mask (`tf.Tensor` of shape `(batch_size,)`): A mask which is 1 for examples for
+ which their answer loss is larger than the answer_loss_cutoff.
+ """
+ # float32 (batch_size,)
+ expected_result = _calculate_expected_result(
+ dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config
+ )
+
+ # [batch_size]
+ answer_masked = tf.where(tf.math.is_nan(answer), tf.zeros_like(answer), answer)
+
+ if config.use_normalized_answer_loss:
+ normalizer = tf.stop_gradient(
+ tf.math.maximum(tf.math.abs(expected_result), tf.math.abs(answer_masked)) + EPSILON_ZERO_DIVISION
+ )
+ normalized_answer_masked = answer_masked / normalizer
+ normalized_expected_result = expected_result / normalizer
+ per_example_answer_loss = tf.compat.v1.losses.huber_loss(
+ normalized_answer_masked * aggregate_mask,
+ normalized_expected_result * aggregate_mask,
+ delta=tf.cast(1.0, tf.float32),
+ reduction=tf.losses.Reduction.NONE,
+ )
+ else:
+ per_example_answer_loss = tf.compat.v1.losses.huber_loss(
+ answer_masked * aggregate_mask,
+ expected_result * aggregate_mask,
+ delta=tf.cast(config.huber_loss_delta, tf.float32),
+ reduction=tf.losses.Reduction.NONE,
+ )
+ if config.answer_loss_cutoff is None:
+ large_answer_loss_mask = tf.ones_like(per_example_answer_loss, dtype=tf.float32)
+ else:
+ large_answer_loss_mask = tf.where(
+ per_example_answer_loss > config.answer_loss_cutoff,
+ tf.zeros_like(per_example_answer_loss, dtype=tf.float32),
+ tf.ones_like(per_example_answer_loss, dtype=tf.float32),
+ )
+ per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask)
+ return per_example_answer_loss_scaled, large_answer_loss_mask
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/tokenization_tapas.py b/venv/lib/python3.10/site-packages/transformers/models/tapas/tokenization_tapas.py
new file mode 100644
index 0000000000000000000000000000000000000000..23fbd5300ed583cc57986e6bca7e39d5d593dcd6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/tapas/tokenization_tapas.py
@@ -0,0 +1,2764 @@
+# coding=utf-8
+# Copyright 2020 Google Research and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Tokenization class for TAPAS model."""
+
+
+import collections
+import datetime
+import enum
+import itertools
+import math
+import os
+import re
+import unicodedata
+from dataclasses import dataclass
+from typing import Callable, Dict, Generator, List, Optional, Tuple, Union
+
+import numpy as np
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...tokenization_utils_base import (
+ ENCODE_KWARGS_DOCSTRING,
+ VERY_LARGE_INTEGER,
+ BatchEncoding,
+ EncodedInput,
+ PreTokenizedInput,
+ TextInput,
+)
+from ...utils import ExplicitEnum, PaddingStrategy, TensorType, add_end_docstrings, is_pandas_available, logging
+
+
+if is_pandas_available():
+ import pandas as pd
+
+logger = logging.get_logger(__name__)
+
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+
+class TapasTruncationStrategy(ExplicitEnum):
+ """
+ Possible values for the `truncation` argument in [`~TapasTokenizer.__call__`]. Useful for tab-completion in an IDE.
+ """
+
+ DROP_ROWS_TO_FIT = "drop_rows_to_fit"
+ DO_NOT_TRUNCATE = "do_not_truncate"
+
+
+TableValue = collections.namedtuple("TokenValue", ["token", "column_id", "row_id"])
+
+
+@dataclass(frozen=True)
+class TokenCoordinates:
+ column_index: int
+ row_index: int
+ token_index: int
+
+
+@dataclass
+class TokenizedTable:
+ rows: List[List[List[str]]]
+ selected_tokens: List[TokenCoordinates]
+
+
+@dataclass(frozen=True)
+class SerializedExample:
+ tokens: List[str]
+ column_ids: List[int]
+ row_ids: List[int]
+ segment_ids: List[int]
+
+
+def _is_inner_wordpiece(token: str):
+ return token.startswith("##")
+
+
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING = r"""
+ add_special_tokens (`bool`, *optional*, defaults to `True`):
+ Whether or not to encode the sequences with the special tokens relative to their model.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
+ Activates and controls padding. Accepts the following values:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+ truncation (`bool`, `str` or [`TapasTruncationStrategy`], *optional*, defaults to `False`):
+ Activates and controls truncation. Accepts the following values:
+
+ - `True` or `'drop_rows_to_fit'`: Truncate to a maximum length specified with the argument `max_length`
+ or to the maximum acceptable input length for the model if that argument is not provided. This will
+ truncate row by row, removing rows from the table.
+ - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
+ greater than the model maximum admissible input size).
+ max_length (`int`, *optional*):
+ Controls the maximum length to use by one of the truncation/padding parameters.
+
+ If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
+ is required by one of the truncation/padding parameters. If the model has no specific maximum input
+ length (like XLNet) truncation/padding to a maximum length will be deactivated.
+ is_split_into_words (`bool`, *optional*, defaults to `False`):
+ Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True`, the
+ tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace)
+ which it will tokenize. This is useful for NER or token classification.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value. This is especially useful to enable
+ the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta).
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+"""
+
+
+class TapasTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a TAPAS tokenizer. Based on WordPiece. Flattens a table and one or more related sentences to be used by
+ TAPAS models.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods. [`TapasTokenizer`] creates several token type ids to
+ encode tabular structure. To be more precise, it adds 7 token type ids, in the following order: `segment_ids`,
+ `column_ids`, `row_ids`, `prev_labels`, `column_ranks`, `inv_column_ranks` and `numeric_relations`:
+
+ - segment_ids: indicate whether a token belongs to the question (0) or the table (1). 0 for special tokens and
+ padding.
+ - column_ids: indicate to which column of the table a token belongs (starting from 1). Is 0 for all question
+ tokens, special tokens and padding.
+ - row_ids: indicate to which row of the table a token belongs (starting from 1). Is 0 for all question tokens,
+ special tokens and padding. Tokens of column headers are also 0.
+ - prev_labels: indicate whether a token was (part of) an answer to the previous question (1) or not (0). Useful in
+ a conversational setup (such as SQA).
+ - column_ranks: indicate the rank of a table token relative to a column, if applicable. For example, if you have a
+ column "number of movies" with values 87, 53 and 69, then the column ranks of these tokens are 3, 1 and 2
+ respectively. 0 for all question tokens, special tokens and padding.
+ - inv_column_ranks: indicate the inverse rank of a table token relative to a column, if applicable. For example, if
+ you have a column "number of movies" with values 87, 53 and 69, then the inverse column ranks of these tokens are
+ 1, 3 and 2 respectively. 0 for all question tokens, special tokens and padding.
+ - numeric_relations: indicate numeric relations between the question and the tokens of the table. 0 for all
+ question tokens, special tokens and padding.
+
+ [`TapasTokenizer`] runs end-to-end tokenization on a table and associated sentences: punctuation splitting and
+ wordpiece.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ sep_token (`str`, *optional*, defaults to `"[SEP]"`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `"[PAD]"`):
+ The token used for padding, for example when batching sequences of different lengths.
+ cls_token (`str`, *optional*, defaults to `"[CLS]"`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ mask_token (`str`, *optional*, defaults to `"[MASK]"`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ empty_token (`str`, *optional*, defaults to `"[EMPTY]"`):
+ The token used for empty cell values in a table. Empty cell values include "", "n/a", "nan" and "?".
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ cell_trim_length (`int`, *optional*, defaults to -1):
+ If > 0: Trim cells so that the length is <= this value. Also disables further cell trimming, should thus be
+ used with `truncation` set to `True`.
+ max_column_id (`int`, *optional*):
+ Max column id to extract.
+ max_row_id (`int`, *optional*):
+ Max row id to extract.
+ strip_column_names (`bool`, *optional*, defaults to `False`):
+ Whether to add empty strings instead of column names.
+ update_answer_coordinates (`bool`, *optional*, defaults to `False`):
+ Whether to recompute the answer coordinates from the answer text.
+ min_question_length (`int`, *optional*):
+ Minimum length of each question in terms of tokens (will be skipped otherwise).
+ max_question_length (`int`, *optional*):
+ Maximum length of each question in terms of tokens (will be skipped otherwise).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ do_basic_tokenize=True,
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ empty_token="[EMPTY]",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ cell_trim_length: int = -1,
+ max_column_id: int = None,
+ max_row_id: int = None,
+ strip_column_names: bool = False,
+ update_answer_coordinates: bool = False,
+ min_question_length=None,
+ max_question_length=None,
+ model_max_length: int = 512,
+ additional_special_tokens: Optional[List[str]] = None,
+ **kwargs,
+ ):
+ if not is_pandas_available():
+ raise ImportError("Pandas is required for the TAPAS tokenizer.")
+
+ if additional_special_tokens is not None:
+ if empty_token not in additional_special_tokens:
+ additional_special_tokens.append(empty_token)
+ else:
+ additional_special_tokens = [empty_token]
+
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ # Additional properties
+ self.cell_trim_length = cell_trim_length
+ self.max_column_id = (
+ max_column_id
+ if max_column_id is not None
+ else model_max_length
+ if model_max_length is not None
+ else VERY_LARGE_INTEGER
+ )
+ self.max_row_id = (
+ max_row_id
+ if max_row_id is not None
+ else model_max_length
+ if model_max_length is not None
+ else VERY_LARGE_INTEGER
+ )
+ self.strip_column_names = strip_column_names
+ self.update_answer_coordinates = update_answer_coordinates
+ self.min_question_length = min_question_length
+ self.max_question_length = max_question_length
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ empty_token=empty_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ cell_trim_length=cell_trim_length,
+ max_column_id=max_column_id,
+ max_row_id=max_row_id,
+ strip_column_names=strip_column_names,
+ update_answer_coordinates=update_answer_coordinates,
+ min_question_length=min_question_length,
+ max_question_length=max_question_length,
+ model_max_length=model_max_length,
+ additional_special_tokens=additional_special_tokens,
+ **kwargs,
+ )
+
+ @property
+ def do_lower_case(self):
+ return self.basic_tokenizer.do_lower_case
+
+ @property
+ def vocab_size(self):
+ return len(self.vocab)
+
+ def get_vocab(self):
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ def _tokenize(self, text):
+ if format_text(text) == EMPTY_TEXT:
+ return [self.additional_special_tokens[0]]
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+ def create_attention_mask_from_sequences(self, query_ids: List[int], table_values: List[TableValue]) -> List[int]:
+ """
+ Creates the attention mask according to the query token IDs and a list of table values.
+
+ Args:
+ query_ids (`List[int]`): list of token IDs corresponding to the ID.
+ table_values (`List[TableValue]`): lift of table values, which are named tuples containing the
+ token value, the column ID and the row ID of said token.
+
+ Returns:
+ `List[int]`: List of ints containing the attention mask values.
+ """
+ return [1] * (1 + len(query_ids) + 1 + len(table_values))
+
+ def create_segment_token_type_ids_from_sequences(
+ self, query_ids: List[int], table_values: List[TableValue]
+ ) -> List[int]:
+ """
+ Creates the segment token type IDs according to the query token IDs and a list of table values.
+
+ Args:
+ query_ids (`List[int]`): list of token IDs corresponding to the ID.
+ table_values (`List[TableValue]`): lift of table values, which are named tuples containing the
+ token value, the column ID and the row ID of said token.
+
+ Returns:
+ `List[int]`: List of ints containing the segment token type IDs values.
+ """
+ table_ids = list(zip(*table_values))[0] if table_values else []
+ return [0] * (1 + len(query_ids) + 1) + [1] * len(table_ids)
+
+ def create_column_token_type_ids_from_sequences(
+ self, query_ids: List[int], table_values: List[TableValue]
+ ) -> List[int]:
+ """
+ Creates the column token type IDs according to the query token IDs and a list of table values.
+
+ Args:
+ query_ids (`List[int]`): list of token IDs corresponding to the ID.
+ table_values (`List[TableValue]`): lift of table values, which are named tuples containing the
+ token value, the column ID and the row ID of said token.
+
+ Returns:
+ `List[int]`: List of ints containing the column token type IDs values.
+ """
+ table_column_ids = list(zip(*table_values))[1] if table_values else []
+ return [0] * (1 + len(query_ids) + 1) + list(table_column_ids)
+
+ def create_row_token_type_ids_from_sequences(
+ self, query_ids: List[int], table_values: List[TableValue]
+ ) -> List[int]:
+ """
+ Creates the row token type IDs according to the query token IDs and a list of table values.
+
+ Args:
+ query_ids (`List[int]`): list of token IDs corresponding to the ID.
+ table_values (`List[TableValue]`): lift of table values, which are named tuples containing the
+ token value, the column ID and the row ID of said token.
+
+ Returns:
+ `List[int]`: List of ints containing the row token type IDs values.
+ """
+ table_row_ids = list(zip(*table_values))[2] if table_values else []
+ return [0] * (1 + len(query_ids) + 1) + list(table_row_ids)
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a question and flattened table for question answering or sequence classification tasks
+ by concatenating and adding special tokens.
+
+ Args:
+ token_ids_0 (`List[int]`): The ids of the question.
+ token_ids_1 (`List[int]`, *optional*): The ids of the flattened table.
+
+ Returns:
+ `List[int]`: The model input with special tokens.
+ """
+ if token_ids_1 is None:
+ raise ValueError("With TAPAS, you must provide both question IDs and table IDs.")
+
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + token_ids_1
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of question IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ List of flattened table IDs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ @add_end_docstrings(TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def __call__(
+ self,
+ table: "pd.DataFrame",
+ queries: Optional[
+ Union[
+ TextInput,
+ PreTokenizedInput,
+ EncodedInput,
+ List[TextInput],
+ List[PreTokenizedInput],
+ List[EncodedInput],
+ ]
+ ] = None,
+ answer_coordinates: Optional[Union[List[Tuple], List[List[Tuple]]]] = None,
+ answer_text: Optional[Union[List[TextInput], List[List[TextInput]]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TapasTruncationStrategy] = False,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Main method to tokenize and prepare for the model one or several sequence(s) related to a table.
+
+ Args:
+ table (`pd.DataFrame`):
+ Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
+ dataframe to convert it to string.
+ queries (`str` or `List[str]`):
+ Question or batch of questions related to a table to be encoded. Note that in case of a batch, all
+ questions must refer to the **same** table.
+ answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*):
+ Answer coordinates of each table-question pair in the batch. In case only a single table-question pair
+ is provided, then the answer_coordinates must be a single list of one or more tuples. Each tuple must
+ be a (row_index, column_index) pair. The first data row (not the column header row) has index 0. The
+ first column has index 0. In case a batch of table-question pairs is provided, then the
+ answer_coordinates must be a list of lists of tuples (each list corresponding to a single
+ table-question pair).
+ answer_text (`List[str]` or `List[List[str]]`, *optional*):
+ Answer text of each table-question pair in the batch. In case only a single table-question pair is
+ provided, then the answer_text must be a single list of one or more strings. Each string must be the
+ answer text of a corresponding answer coordinate. In case a batch of table-question pairs is provided,
+ then the answer_coordinates must be a list of lists of strings (each list corresponding to a single
+ table-question pair).
+ """
+ assert isinstance(table, pd.DataFrame), "Table must be of type pd.DataFrame"
+
+ # Input type checking for clearer error
+ valid_query = False
+
+ # Check that query has a valid type
+ if queries is None or isinstance(queries, str):
+ valid_query = True
+ elif isinstance(queries, (list, tuple)):
+ if len(queries) == 0 or isinstance(queries[0], str):
+ valid_query = True
+
+ if not valid_query:
+ raise ValueError(
+ "queries input must of type `str` (single example), `List[str]` (batch or single pretokenized"
+ " example). "
+ )
+ is_batched = isinstance(queries, (list, tuple))
+
+ if is_batched:
+ return self.batch_encode_plus(
+ table=table,
+ queries=queries,
+ answer_coordinates=answer_coordinates,
+ answer_text=answer_text,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+ else:
+ return self.encode_plus(
+ table=table,
+ query=queries,
+ answer_coordinates=answer_coordinates,
+ answer_text=answer_text,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def batch_encode_plus(
+ self,
+ table: "pd.DataFrame",
+ queries: Optional[
+ Union[
+ List[TextInput],
+ List[PreTokenizedInput],
+ List[EncodedInput],
+ ]
+ ] = None,
+ answer_coordinates: Optional[List[List[Tuple]]] = None,
+ answer_text: Optional[List[List[TextInput]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TapasTruncationStrategy] = False,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepare a table and a list of strings for the model.
+
+
+
+ This method is deprecated, `__call__` should be used instead.
+
+
+
+ Args:
+ table (`pd.DataFrame`):
+ Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
+ dataframe to convert it to string.
+ queries (`List[str]`):
+ Batch of questions related to a table to be encoded. Note that all questions must refer to the **same**
+ table.
+ answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*):
+ Answer coordinates of each table-question pair in the batch. Each tuple must be a (row_index,
+ column_index) pair. The first data row (not the column header row) has index 0. The first column has
+ index 0. The answer_coordinates must be a list of lists of tuples (each list corresponding to a single
+ table-question pair).
+ answer_text (`List[str]` or `List[List[str]]`, *optional*):
+ Answer text of each table-question pair in the batch. In case a batch of table-question pairs is
+ provided, then the answer_coordinates must be a list of lists of strings (each list corresponding to a
+ single table-question pair). Each string must be the answer text of a corresponding answer coordinate.
+ """
+ if return_token_type_ids is not None and not add_special_tokens:
+ raise ValueError(
+ "Asking to return token_type_ids while setting add_special_tokens to False "
+ "results in an undefined behavior. Please set add_special_tokens to True or "
+ "set return_token_type_ids to None."
+ )
+
+ if (answer_coordinates and not answer_text) or (not answer_coordinates and answer_text):
+ raise ValueError("In case you provide answers, both answer_coordinates and answer_text should be provided")
+ elif answer_coordinates is None and answer_text is None:
+ answer_coordinates = answer_text = [None] * len(queries)
+
+ if "is_split_into_words" in kwargs:
+ raise NotImplementedError("Currently TapasTokenizer only supports questions as strings.")
+
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast."
+ )
+
+ return self._batch_encode_plus(
+ table=table,
+ queries=queries,
+ answer_coordinates=answer_coordinates,
+ answer_text=answer_text,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _get_question_tokens(self, query):
+ """Tokenizes the query, taking into account the max and min question length."""
+
+ query_tokens = self.tokenize(query)
+ if self.max_question_length is not None and len(query_tokens) > self.max_question_length:
+ logger.warning("Skipping query as its tokens are longer than the max question length")
+ return "", []
+ if self.min_question_length is not None and len(query_tokens) < self.min_question_length:
+ logger.warning("Skipping query as its tokens are shorter than the min question length")
+ return "", []
+
+ return query, query_tokens
+
+ def _batch_encode_plus(
+ self,
+ table,
+ queries: Union[
+ List[TextInput],
+ List[PreTokenizedInput],
+ List[EncodedInput],
+ ],
+ answer_coordinates: Optional[List[List[Tuple]]] = None,
+ answer_text: Optional[List[List[TextInput]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TapasTruncationStrategy] = False,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = True,
+ return_attention_mask: Optional[bool] = None,
+ return_overflowing_tokens: bool = False,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ table_tokens = self._tokenize_table(table)
+
+ queries_tokens = []
+ for idx, query in enumerate(queries):
+ query, query_tokens = self._get_question_tokens(query)
+ queries[idx] = query
+ queries_tokens.append(query_tokens)
+
+ batch_outputs = self._batch_prepare_for_model(
+ table,
+ queries,
+ tokenized_table=table_tokens,
+ queries_tokens=queries_tokens,
+ answer_coordinates=answer_coordinates,
+ padding=padding,
+ truncation=truncation,
+ answer_text=answer_text,
+ add_special_tokens=add_special_tokens,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ prepend_batch_axis=True,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_overflowing_tokens=return_overflowing_tokens,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ verbose=verbose,
+ )
+
+ return BatchEncoding(batch_outputs)
+
+ def _batch_prepare_for_model(
+ self,
+ raw_table: "pd.DataFrame",
+ raw_queries: Union[
+ List[TextInput],
+ List[PreTokenizedInput],
+ List[EncodedInput],
+ ],
+ tokenized_table: Optional[TokenizedTable] = None,
+ queries_tokens: Optional[List[List[str]]] = None,
+ answer_coordinates: Optional[List[List[Tuple]]] = None,
+ answer_text: Optional[List[List[TextInput]]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TapasTruncationStrategy] = False,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = True,
+ return_attention_mask: Optional[bool] = True,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ prepend_batch_axis: bool = False,
+ **kwargs,
+ ) -> BatchEncoding:
+ batch_outputs = {}
+
+ for index, example in enumerate(zip(raw_queries, queries_tokens, answer_coordinates, answer_text)):
+ raw_query, query_tokens, answer_coords, answer_txt = example
+ outputs = self.prepare_for_model(
+ raw_table,
+ raw_query,
+ tokenized_table=tokenized_table,
+ query_tokens=query_tokens,
+ answer_coordinates=answer_coords,
+ answer_text=answer_txt,
+ add_special_tokens=add_special_tokens,
+ padding=PaddingStrategy.DO_NOT_PAD.value, # we pad in batch afterwards
+ truncation=truncation,
+ max_length=max_length,
+ pad_to_multiple_of=None, # we pad in batch afterwards
+ return_attention_mask=False, # we pad in batch afterwards
+ return_token_type_ids=return_token_type_ids,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ return_tensors=None, # We convert the whole batch to tensors at the end
+ prepend_batch_axis=False,
+ verbose=verbose,
+ prev_answer_coordinates=answer_coordinates[index - 1] if index != 0 else None,
+ prev_answer_text=answer_text[index - 1] if index != 0 else None,
+ )
+
+ for key, value in outputs.items():
+ if key not in batch_outputs:
+ batch_outputs[key] = []
+ batch_outputs[key].append(value)
+
+ batch_outputs = self.pad(
+ batch_outputs,
+ padding=padding,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ batch_outputs = BatchEncoding(batch_outputs, tensor_type=return_tensors)
+
+ return batch_outputs
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING)
+ def encode(
+ self,
+ table: "pd.DataFrame",
+ query: Optional[
+ Union[
+ TextInput,
+ PreTokenizedInput,
+ EncodedInput,
+ ]
+ ] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TapasTruncationStrategy] = False,
+ max_length: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ **kwargs,
+ ) -> List[int]:
+ """
+ Prepare a table and a string for the model. This method does not return token type IDs, attention masks, etc.
+ which are necessary for the model to work correctly. Use that method if you want to build your processing on
+ your own, otherwise refer to `__call__`.
+
+ Args:
+ table (`pd.DataFrame`):
+ Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
+ dataframe to convert it to string.
+ query (`str` or `List[str]`):
+ Question related to a table to be encoded.
+ """
+ encoded_inputs = self.encode_plus(
+ table,
+ query=query,
+ add_special_tokens=add_special_tokens,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ return_tensors=return_tensors,
+ **kwargs,
+ )
+
+ return encoded_inputs["input_ids"]
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def encode_plus(
+ self,
+ table: "pd.DataFrame",
+ query: Optional[
+ Union[
+ TextInput,
+ PreTokenizedInput,
+ EncodedInput,
+ ]
+ ] = None,
+ answer_coordinates: Optional[List[Tuple]] = None,
+ answer_text: Optional[List[TextInput]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TapasTruncationStrategy] = False,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepare a table and a string for the model.
+
+ Args:
+ table (`pd.DataFrame`):
+ Table containing tabular data. Note that all cell values must be text. Use *.astype(str)* on a Pandas
+ dataframe to convert it to string.
+ query (`str` or `List[str]`):
+ Question related to a table to be encoded.
+ answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*):
+ Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single
+ list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row
+ (not the column header row) has index 0. The first column has index 0.
+ answer_text (`List[str]` or `List[List[str]]`, *optional*):
+ Answer text of each table-question pair in the batch. The answer_text must be a single list of one or
+ more strings. Each string must be the answer text of a corresponding answer coordinate.
+ """
+ if return_token_type_ids is not None and not add_special_tokens:
+ raise ValueError(
+ "Asking to return token_type_ids while setting add_special_tokens to False "
+ "results in an undefined behavior. Please set add_special_tokens to True or "
+ "set return_token_type_ids to None."
+ )
+
+ if (answer_coordinates and not answer_text) or (not answer_coordinates and answer_text):
+ raise ValueError("In case you provide answers, both answer_coordinates and answer_text should be provided")
+
+ if "is_split_into_words" in kwargs:
+ raise NotImplementedError("Currently TapasTokenizer only supports questions as strings.")
+
+ if return_offsets_mapping:
+ raise NotImplementedError(
+ "return_offset_mapping is not available when using Python tokenizers. "
+ "To use this feature, change your tokenizer to one deriving from "
+ "transformers.PreTrainedTokenizerFast."
+ )
+
+ return self._encode_plus(
+ table=table,
+ query=query,
+ answer_coordinates=answer_coordinates,
+ answer_text=answer_text,
+ add_special_tokens=add_special_tokens,
+ truncation=truncation,
+ padding=padding,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ return_token_type_ids=return_token_type_ids,
+ return_attention_mask=return_attention_mask,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_offsets_mapping=return_offsets_mapping,
+ return_length=return_length,
+ verbose=verbose,
+ **kwargs,
+ )
+
+ def _encode_plus(
+ self,
+ table: "pd.DataFrame",
+ query: Union[
+ TextInput,
+ PreTokenizedInput,
+ EncodedInput,
+ ],
+ answer_coordinates: Optional[List[Tuple]] = None,
+ answer_text: Optional[List[TextInput]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TapasTruncationStrategy] = False,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = True,
+ return_attention_mask: Optional[bool] = True,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ **kwargs,
+ ):
+ if query is None:
+ query = ""
+ logger.warning(
+ "TAPAS is a question answering model but you have not passed a query. Please be aware that the "
+ "model will probably not behave correctly."
+ )
+
+ table_tokens = self._tokenize_table(table)
+ query, query_tokens = self._get_question_tokens(query)
+
+ return self.prepare_for_model(
+ table,
+ query,
+ tokenized_table=table_tokens,
+ query_tokens=query_tokens,
+ answer_coordinates=answer_coordinates,
+ answer_text=answer_text,
+ add_special_tokens=add_special_tokens,
+ truncation=truncation,
+ padding=padding,
+ max_length=max_length,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_tensors=return_tensors,
+ prepend_batch_axis=True,
+ return_attention_mask=return_attention_mask,
+ return_token_type_ids=return_token_type_ids,
+ return_special_tokens_mask=return_special_tokens_mask,
+ return_length=return_length,
+ verbose=verbose,
+ )
+
+ @add_end_docstrings(ENCODE_KWARGS_DOCSTRING, TAPAS_ENCODE_PLUS_ADDITIONAL_KWARGS_DOCSTRING)
+ def prepare_for_model(
+ self,
+ raw_table: "pd.DataFrame",
+ raw_query: Union[
+ TextInput,
+ PreTokenizedInput,
+ EncodedInput,
+ ],
+ tokenized_table: Optional[TokenizedTable] = None,
+ query_tokens: Optional[TokenizedTable] = None,
+ answer_coordinates: Optional[List[Tuple]] = None,
+ answer_text: Optional[List[TextInput]] = None,
+ add_special_tokens: bool = True,
+ padding: Union[bool, str, PaddingStrategy] = False,
+ truncation: Union[bool, str, TapasTruncationStrategy] = False,
+ max_length: Optional[int] = None,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_token_type_ids: Optional[bool] = True,
+ return_attention_mask: Optional[bool] = True,
+ return_special_tokens_mask: bool = False,
+ return_offsets_mapping: bool = False,
+ return_length: bool = False,
+ verbose: bool = True,
+ prepend_batch_axis: bool = False,
+ **kwargs,
+ ) -> BatchEncoding:
+ """
+ Prepares a sequence of input id so that it can be used by the model. It adds special tokens, truncates
+ sequences if overflowing while taking into account the special tokens.
+
+ Args:
+ raw_table (`pd.DataFrame`):
+ The original table before any transformation (like tokenization) was applied to it.
+ raw_query (`TextInput` or `PreTokenizedInput` or `EncodedInput`):
+ The original query before any transformation (like tokenization) was applied to it.
+ tokenized_table (`TokenizedTable`):
+ The table after tokenization.
+ query_tokens (`List[str]`):
+ The query after tokenization.
+ answer_coordinates (`List[Tuple]` or `List[List[Tuple]]`, *optional*):
+ Answer coordinates of each table-question pair in the batch. The answer_coordinates must be a single
+ list of one or more tuples. Each tuple must be a (row_index, column_index) pair. The first data row
+ (not the column header row) has index 0. The first column has index 0.
+ answer_text (`List[str]` or `List[List[str]]`, *optional*):
+ Answer text of each table-question pair in the batch. The answer_text must be a single list of one or
+ more strings. Each string must be the answer text of a corresponding answer coordinate.
+ """
+ if isinstance(padding, bool):
+ if padding and (max_length is not None or pad_to_multiple_of is not None):
+ padding = PaddingStrategy.MAX_LENGTH
+ else:
+ padding = PaddingStrategy.DO_NOT_PAD
+ elif not isinstance(padding, PaddingStrategy):
+ padding = PaddingStrategy(padding)
+
+ if isinstance(truncation, bool):
+ if truncation:
+ truncation = TapasTruncationStrategy.DROP_ROWS_TO_FIT
+ else:
+ truncation = TapasTruncationStrategy.DO_NOT_TRUNCATE
+ elif not isinstance(truncation, TapasTruncationStrategy):
+ truncation = TapasTruncationStrategy(truncation)
+
+ encoded_inputs = {}
+
+ is_part_of_batch = False
+ prev_answer_coordinates, prev_answer_text = None, None
+ if "prev_answer_coordinates" in kwargs and "prev_answer_text" in kwargs:
+ is_part_of_batch = True
+ prev_answer_coordinates = kwargs["prev_answer_coordinates"]
+ prev_answer_text = kwargs["prev_answer_text"]
+
+ num_rows = self._get_num_rows(raw_table, truncation != TapasTruncationStrategy.DO_NOT_TRUNCATE)
+ num_columns = self._get_num_columns(raw_table)
+ _, _, num_tokens = self._get_table_boundaries(tokenized_table)
+
+ if truncation != TapasTruncationStrategy.DO_NOT_TRUNCATE:
+ num_rows, num_tokens = self._get_truncated_table_rows(
+ query_tokens, tokenized_table, num_rows, num_columns, max_length, truncation_strategy=truncation
+ )
+ table_data = list(self._get_table_values(tokenized_table, num_columns, num_rows, num_tokens))
+
+ query_ids = self.convert_tokens_to_ids(query_tokens)
+ table_ids = list(zip(*table_data))[0] if len(table_data) > 0 else list(zip(*table_data))
+ table_ids = self.convert_tokens_to_ids(list(table_ids))
+
+ if "return_overflowing_tokens" in kwargs and kwargs["return_overflowing_tokens"]:
+ raise ValueError("TAPAS does not return overflowing tokens as it works on tables.")
+
+ if add_special_tokens:
+ input_ids = self.build_inputs_with_special_tokens(query_ids, table_ids)
+ else:
+ input_ids = query_ids + table_ids
+
+ if max_length is not None and len(input_ids) > max_length:
+ raise ValueError(
+ "Could not encode the query and table header given the maximum length. Encoding the query and table "
+ f"header results in a length of {len(input_ids)} which is higher than the max_length of {max_length}"
+ )
+
+ encoded_inputs["input_ids"] = input_ids
+
+ segment_ids = self.create_segment_token_type_ids_from_sequences(query_ids, table_data)
+ column_ids = self.create_column_token_type_ids_from_sequences(query_ids, table_data)
+ row_ids = self.create_row_token_type_ids_from_sequences(query_ids, table_data)
+ if not is_part_of_batch or (prev_answer_coordinates is None and prev_answer_text is None):
+ # simply set the prev_labels to zeros
+ prev_labels = [0] * len(row_ids)
+ else:
+ prev_labels = self.get_answer_ids(
+ column_ids, row_ids, table_data, prev_answer_text, prev_answer_coordinates
+ )
+
+ # FIRST: parse both the table and question in terms of numeric values
+
+ raw_table = add_numeric_table_values(raw_table)
+ raw_query = add_numeric_values_to_question(raw_query)
+
+ # SECOND: add numeric-related features (and not parse them in these functions):
+
+ column_ranks, inv_column_ranks = self._get_numeric_column_ranks(column_ids, row_ids, raw_table)
+ numeric_relations = self._get_numeric_relations(raw_query, column_ids, row_ids, raw_table)
+
+ # Load from model defaults
+ if return_token_type_ids is None:
+ return_token_type_ids = "token_type_ids" in self.model_input_names
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ if return_attention_mask:
+ attention_mask = self.create_attention_mask_from_sequences(query_ids, table_data)
+ encoded_inputs["attention_mask"] = attention_mask
+
+ if answer_coordinates is not None and answer_text is not None:
+ labels = self.get_answer_ids(column_ids, row_ids, table_data, answer_text, answer_coordinates)
+ numeric_values = self._get_numeric_values(raw_table, column_ids, row_ids)
+ numeric_values_scale = self._get_numeric_values_scale(raw_table, column_ids, row_ids)
+
+ encoded_inputs["labels"] = labels
+ encoded_inputs["numeric_values"] = numeric_values
+ encoded_inputs["numeric_values_scale"] = numeric_values_scale
+
+ if return_token_type_ids:
+ token_type_ids = [
+ segment_ids,
+ column_ids,
+ row_ids,
+ prev_labels,
+ column_ranks,
+ inv_column_ranks,
+ numeric_relations,
+ ]
+
+ token_type_ids = [list(ids) for ids in list(zip(*token_type_ids))]
+ encoded_inputs["token_type_ids"] = token_type_ids
+
+ if return_special_tokens_mask:
+ if add_special_tokens:
+ encoded_inputs["special_tokens_mask"] = self.get_special_tokens_mask(query_ids, table_ids)
+ else:
+ encoded_inputs["special_tokens_mask"] = [0] * len(input_ids)
+
+ # Check lengths
+ if max_length is None and len(encoded_inputs["input_ids"]) > self.model_max_length and verbose:
+ if not self.deprecation_warnings.get("sequence-length-is-longer-than-the-specified-maximum", False):
+ logger.warning(
+ "Token indices sequence length is longer than the specified maximum sequence length "
+ f"for this model ({len(encoded_inputs['input_ids'])} > {self.model_max_length}). Running this "
+ "sequence through the model will result in indexing errors."
+ )
+ self.deprecation_warnings["sequence-length-is-longer-than-the-specified-maximum"] = True
+
+ # Padding
+ if padding != PaddingStrategy.DO_NOT_PAD or return_attention_mask:
+ encoded_inputs = self.pad(
+ encoded_inputs,
+ max_length=max_length,
+ padding=padding.value,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ if return_length:
+ encoded_inputs["length"] = len(encoded_inputs["input_ids"])
+
+ batch_outputs = BatchEncoding(
+ encoded_inputs, tensor_type=return_tensors, prepend_batch_axis=prepend_batch_axis
+ )
+
+ return batch_outputs
+
+ def _get_truncated_table_rows(
+ self,
+ query_tokens: List[str],
+ tokenized_table: TokenizedTable,
+ num_rows: int,
+ num_columns: int,
+ max_length: int,
+ truncation_strategy: Union[str, TapasTruncationStrategy],
+ ) -> Tuple[int, int]:
+ """
+ Truncates a sequence pair in-place following the strategy.
+
+ Args:
+ query_tokens (`List[str]`):
+ List of strings corresponding to the tokenized query.
+ tokenized_table (`TokenizedTable`):
+ Tokenized table
+ num_rows (`int`):
+ Total number of table rows
+ num_columns (`int`):
+ Total number of table columns
+ max_length (`int`):
+ Total maximum length.
+ truncation_strategy (`str` or [`TapasTruncationStrategy`]):
+ Truncation strategy to use. Seeing as this method should only be called when truncating, the only
+ available strategy is the `"drop_rows_to_fit"` strategy.
+
+ Returns:
+ `Tuple(int, int)`: tuple containing the number of rows after truncation, and the number of tokens available
+ for each table element.
+ """
+ if not isinstance(truncation_strategy, TapasTruncationStrategy):
+ truncation_strategy = TapasTruncationStrategy(truncation_strategy)
+
+ if max_length is None:
+ max_length = self.model_max_length
+
+ if truncation_strategy == TapasTruncationStrategy.DROP_ROWS_TO_FIT:
+ while True:
+ num_tokens = self._get_max_num_tokens(
+ query_tokens, tokenized_table, num_rows=num_rows, num_columns=num_columns, max_length=max_length
+ )
+
+ if num_tokens is not None:
+ # We could fit the table.
+ break
+
+ # Try to drop a row to fit the table.
+ num_rows -= 1
+
+ if num_rows < 1:
+ break
+ elif truncation_strategy != TapasTruncationStrategy.DO_NOT_TRUNCATE:
+ raise ValueError(f"Unknown truncation strategy {truncation_strategy}.")
+
+ return num_rows, num_tokens or 1
+
+ def _tokenize_table(
+ self,
+ table=None,
+ ):
+ """
+ Tokenizes column headers and cell texts of a table.
+
+ Args:
+ table (`pd.Dataframe`):
+ Table. Returns: `TokenizedTable`: TokenizedTable object.
+ """
+ tokenized_rows = []
+ tokenized_row = []
+ # tokenize column headers
+ for column in table:
+ if self.strip_column_names:
+ tokenized_row.append(self.tokenize(""))
+ else:
+ tokenized_row.append(self.tokenize(column))
+ tokenized_rows.append(tokenized_row)
+
+ # tokenize cell values
+ for idx, row in table.iterrows():
+ tokenized_row = []
+ for cell in row:
+ tokenized_row.append(self.tokenize(cell))
+ tokenized_rows.append(tokenized_row)
+
+ token_coordinates = []
+ for row_index, row in enumerate(tokenized_rows):
+ for column_index, cell in enumerate(row):
+ for token_index, _ in enumerate(cell):
+ token_coordinates.append(
+ TokenCoordinates(
+ row_index=row_index,
+ column_index=column_index,
+ token_index=token_index,
+ )
+ )
+
+ return TokenizedTable(
+ rows=tokenized_rows,
+ selected_tokens=token_coordinates,
+ )
+
+ def _question_encoding_cost(self, question_tokens):
+ # Two extra spots of SEP and CLS.
+ return len(question_tokens) + 2
+
+ def _get_token_budget(self, question_tokens, max_length=None):
+ """
+ Computes the number of tokens left for the table after tokenizing a question, taking into account the max
+ sequence length of the model.
+
+ Args:
+ question_tokens (`List[String]`):
+ List of question tokens. Returns: `int`: the number of tokens left for the table, given the model max
+ length.
+ """
+ return (max_length if max_length is not None else self.model_max_length) - self._question_encoding_cost(
+ question_tokens
+ )
+
+ def _get_table_values(self, table, num_columns, num_rows, num_tokens) -> Generator[TableValue, None, None]:
+ """Iterates over partial table and returns token, column and row indexes."""
+ for tc in table.selected_tokens:
+ # First row is header row.
+ if tc.row_index >= num_rows + 1:
+ continue
+ if tc.column_index >= num_columns:
+ continue
+ cell = table.rows[tc.row_index][tc.column_index]
+ token = cell[tc.token_index]
+ word_begin_index = tc.token_index
+ # Don't add partial words. Find the starting word piece and check if it
+ # fits in the token budget.
+ while word_begin_index >= 0 and _is_inner_wordpiece(cell[word_begin_index]):
+ word_begin_index -= 1
+ if word_begin_index >= num_tokens:
+ continue
+ yield TableValue(token, tc.column_index + 1, tc.row_index)
+
+ def _get_table_boundaries(self, table):
+ """Return maximal number of rows, columns and tokens."""
+ max_num_tokens = 0
+ max_num_columns = 0
+ max_num_rows = 0
+ for tc in table.selected_tokens:
+ max_num_columns = max(max_num_columns, tc.column_index + 1)
+ max_num_rows = max(max_num_rows, tc.row_index + 1)
+ max_num_tokens = max(max_num_tokens, tc.token_index + 1)
+ max_num_columns = min(self.max_column_id, max_num_columns)
+ max_num_rows = min(self.max_row_id, max_num_rows)
+ return max_num_rows, max_num_columns, max_num_tokens
+
+ def _get_table_cost(self, table, num_columns, num_rows, num_tokens):
+ return sum(1 for _ in self._get_table_values(table, num_columns, num_rows, num_tokens))
+
+ def _get_max_num_tokens(self, question_tokens, tokenized_table, num_columns, num_rows, max_length):
+ """Computes max number of tokens that can be squeezed into the budget."""
+ token_budget = self._get_token_budget(question_tokens, max_length)
+ _, _, max_num_tokens = self._get_table_boundaries(tokenized_table)
+ if self.cell_trim_length >= 0 and max_num_tokens > self.cell_trim_length:
+ max_num_tokens = self.cell_trim_length
+ num_tokens = 0
+ for num_tokens in range(max_num_tokens + 1):
+ cost = self._get_table_cost(tokenized_table, num_columns, num_rows, num_tokens + 1)
+ if cost > token_budget:
+ break
+ if num_tokens < max_num_tokens:
+ if self.cell_trim_length >= 0:
+ # We don't allow dynamic trimming if a cell_trim_length is set.
+ return None
+ if num_tokens == 0:
+ return None
+ return num_tokens
+
+ def _get_num_columns(self, table):
+ num_columns = table.shape[1]
+ if num_columns >= self.max_column_id:
+ raise ValueError("Too many columns")
+ return num_columns
+
+ def _get_num_rows(self, table, drop_rows_to_fit):
+ num_rows = table.shape[0]
+ if num_rows >= self.max_row_id:
+ if drop_rows_to_fit:
+ num_rows = self.max_row_id - 1
+ else:
+ raise ValueError("Too many rows")
+ return num_rows
+
+ def _serialize_text(self, question_tokens):
+ """Serializes texts in index arrays."""
+ tokens = []
+ segment_ids = []
+ column_ids = []
+ row_ids = []
+
+ # add [CLS] token at the beginning
+ tokens.append(self.cls_token)
+ segment_ids.append(0)
+ column_ids.append(0)
+ row_ids.append(0)
+
+ for token in question_tokens:
+ tokens.append(token)
+ segment_ids.append(0)
+ column_ids.append(0)
+ row_ids.append(0)
+
+ return tokens, segment_ids, column_ids, row_ids
+
+ def _serialize(
+ self,
+ question_tokens,
+ table,
+ num_columns,
+ num_rows,
+ num_tokens,
+ ):
+ """Serializes table and text."""
+ tokens, segment_ids, column_ids, row_ids = self._serialize_text(question_tokens)
+
+ # add [SEP] token between question and table tokens
+ tokens.append(self.sep_token)
+ segment_ids.append(0)
+ column_ids.append(0)
+ row_ids.append(0)
+
+ for token, column_id, row_id in self._get_table_values(table, num_columns, num_rows, num_tokens):
+ tokens.append(token)
+ segment_ids.append(1)
+ column_ids.append(column_id)
+ row_ids.append(row_id)
+
+ return SerializedExample(
+ tokens=tokens,
+ segment_ids=segment_ids,
+ column_ids=column_ids,
+ row_ids=row_ids,
+ )
+
+ def _get_column_values(self, table, col_index):
+ table_numeric_values = {}
+ for row_index, row in table.iterrows():
+ cell = row[col_index]
+ if cell.numeric_value is not None:
+ table_numeric_values[row_index] = cell.numeric_value
+ return table_numeric_values
+
+ def _get_cell_token_indexes(self, column_ids, row_ids, column_id, row_id):
+ for index in range(len(column_ids)):
+ if column_ids[index] - 1 == column_id and row_ids[index] - 1 == row_id:
+ yield index
+
+ def _get_numeric_column_ranks(self, column_ids, row_ids, table):
+ """Returns column ranks for all numeric columns."""
+
+ ranks = [0] * len(column_ids)
+ inv_ranks = [0] * len(column_ids)
+
+ # original code from tf_example_utils.py of the original implementation
+ if table is not None:
+ for col_index in range(len(table.columns)):
+ table_numeric_values = self._get_column_values(table, col_index)
+
+ if not table_numeric_values:
+ continue
+
+ try:
+ key_fn = get_numeric_sort_key_fn(table_numeric_values.values())
+ except ValueError:
+ continue
+
+ table_numeric_values = {row_index: key_fn(value) for row_index, value in table_numeric_values.items()}
+
+ table_numeric_values_inv = collections.defaultdict(list)
+ for row_index, value in table_numeric_values.items():
+ table_numeric_values_inv[value].append(row_index)
+
+ unique_values = sorted(table_numeric_values_inv.keys())
+
+ for rank, value in enumerate(unique_values):
+ for row_index in table_numeric_values_inv[value]:
+ for index in self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index):
+ ranks[index] = rank + 1
+ inv_ranks[index] = len(unique_values) - rank
+
+ return ranks, inv_ranks
+
+ def _get_numeric_sort_key_fn(self, table_numeric_values, value):
+ """
+ Returns the sort key function for comparing value to table values. The function returned will be a suitable
+ input for the key param of the sort(). See number_annotation_utils._get_numeric_sort_key_fn for details
+
+ Args:
+ table_numeric_values: Numeric values of a column
+ value: Numeric value in the question
+
+ Returns:
+ A function key function to compare column and question values.
+ """
+ if not table_numeric_values:
+ return None
+ all_values = list(table_numeric_values.values())
+ all_values.append(value)
+ try:
+ return get_numeric_sort_key_fn(all_values)
+ except ValueError:
+ return None
+
+ def _get_numeric_relations(self, question, column_ids, row_ids, table):
+ """
+ Returns numeric relations embeddings
+
+ Args:
+ question: Question object.
+ column_ids: Maps word piece position to column id.
+ row_ids: Maps word piece position to row id.
+ table: The table containing the numeric cell values.
+ """
+
+ numeric_relations = [0] * len(column_ids)
+
+ # first, we add any numeric value spans to the question:
+ # Create a dictionary that maps a table cell to the set of all relations
+ # this cell has with any value in the question.
+ cell_indices_to_relations = collections.defaultdict(set)
+ if question is not None and table is not None:
+ for numeric_value_span in question.numeric_spans:
+ for value in numeric_value_span.values:
+ for column_index in range(len(table.columns)):
+ table_numeric_values = self._get_column_values(table, column_index)
+ sort_key_fn = self._get_numeric_sort_key_fn(table_numeric_values, value)
+ if sort_key_fn is None:
+ continue
+ for row_index, cell_value in table_numeric_values.items():
+ relation = get_numeric_relation(value, cell_value, sort_key_fn)
+ if relation is not None:
+ cell_indices_to_relations[column_index, row_index].add(relation)
+
+ # For each cell add a special feature for all its word pieces.
+ for (column_index, row_index), relations in cell_indices_to_relations.items():
+ relation_set_index = 0
+ for relation in relations:
+ assert relation.value >= Relation.EQ.value
+ relation_set_index += 2 ** (relation.value - Relation.EQ.value)
+ for cell_token_index in self._get_cell_token_indexes(column_ids, row_ids, column_index, row_index):
+ numeric_relations[cell_token_index] = relation_set_index
+
+ return numeric_relations
+
+ def _get_numeric_values(self, table, column_ids, row_ids):
+ """Returns numeric values for computation of answer loss."""
+
+ numeric_values = [float("nan")] * len(column_ids)
+
+ if table is not None:
+ num_rows = table.shape[0]
+ num_columns = table.shape[1]
+
+ for col_index in range(num_columns):
+ for row_index in range(num_rows):
+ numeric_value = table.iloc[row_index, col_index].numeric_value
+ if numeric_value is not None:
+ if numeric_value.float_value is None:
+ continue
+ float_value = numeric_value.float_value
+ if float_value == float("inf"):
+ continue
+ for index in self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index):
+ numeric_values[index] = float_value
+
+ return numeric_values
+
+ def _get_numeric_values_scale(self, table, column_ids, row_ids):
+ """Returns a scale to each token to down weigh the value of long words."""
+
+ numeric_values_scale = [1.0] * len(column_ids)
+
+ if table is None:
+ return numeric_values_scale
+
+ num_rows = table.shape[0]
+ num_columns = table.shape[1]
+
+ for col_index in range(num_columns):
+ for row_index in range(num_rows):
+ indices = list(self._get_cell_token_indexes(column_ids, row_ids, col_index, row_index))
+ num_indices = len(indices)
+ if num_indices > 1:
+ for index in indices:
+ numeric_values_scale[index] = float(num_indices)
+
+ return numeric_values_scale
+
+ def _pad_to_seq_length(self, inputs):
+ while len(inputs) > self.model_max_length:
+ inputs.pop()
+ while len(inputs) < self.model_max_length:
+ inputs.append(0)
+
+ def _get_all_answer_ids_from_coordinates(
+ self,
+ column_ids,
+ row_ids,
+ answers_list,
+ ):
+ """Maps lists of answer coordinates to token indexes."""
+ answer_ids = [0] * len(column_ids)
+ found_answers = set()
+ all_answers = set()
+ for answers in answers_list:
+ column_index, row_index = answers
+ all_answers.add((column_index, row_index))
+ for index in self._get_cell_token_indexes(column_ids, row_ids, column_index, row_index):
+ found_answers.add((column_index, row_index))
+ answer_ids[index] = 1
+
+ missing_count = len(all_answers) - len(found_answers)
+ return answer_ids, missing_count
+
+ def _get_all_answer_ids(self, column_ids, row_ids, answer_coordinates):
+ """
+ Maps answer coordinates of a question to token indexes.
+
+ In the SQA format (TSV), the coordinates are given as (row, column) tuples. Here, we first swap them to
+ (column, row) format before calling _get_all_answer_ids_from_coordinates.
+ """
+
+ def _to_coordinates(answer_coordinates_question):
+ return [(coords[1], coords[0]) for coords in answer_coordinates_question]
+
+ return self._get_all_answer_ids_from_coordinates(
+ column_ids, row_ids, answers_list=(_to_coordinates(answer_coordinates))
+ )
+
+ def _find_tokens(self, text, segment):
+ """Return start index of segment in text or None."""
+ logging.info(f"text: {text} {segment}")
+ for index in range(1 + len(text) - len(segment)):
+ for seg_index, seg_token in enumerate(segment):
+ if text[index + seg_index].piece != seg_token.piece:
+ break
+ else:
+ return index
+ return None
+
+ def _find_answer_coordinates_from_answer_text(
+ self,
+ tokenized_table,
+ answer_text,
+ ):
+ """Returns all occurrences of answer_text in the table."""
+ logging.info(f"answer text: {answer_text}")
+ for row_index, row in enumerate(tokenized_table.rows):
+ if row_index == 0:
+ # We don't search for answers in the header.
+ continue
+ for col_index, cell in enumerate(row):
+ token_index = self._find_tokens(cell, answer_text)
+ if token_index is not None:
+ yield TokenCoordinates(
+ row_index=row_index,
+ column_index=col_index,
+ token_index=token_index,
+ )
+
+ def _find_answer_ids_from_answer_texts(
+ self,
+ column_ids,
+ row_ids,
+ tokenized_table,
+ answer_texts,
+ ):
+ """Maps question with answer texts to the first matching token indexes."""
+ answer_ids = [0] * len(column_ids)
+ for answer_text in answer_texts:
+ for coordinates in self._find_answer_coordinates_from_answer_text(
+ tokenized_table,
+ answer_text,
+ ):
+ # Maps answer coordinates to indexes this can fail if tokens / rows have
+ # been pruned.
+ indexes = list(
+ self._get_cell_token_indexes(
+ column_ids,
+ row_ids,
+ column_id=coordinates.column_index,
+ row_id=coordinates.row_index - 1,
+ )
+ )
+ indexes.sort()
+ coordinate_answer_ids = []
+ if indexes:
+ begin_index = coordinates.token_index + indexes[0]
+ end_index = begin_index + len(answer_text)
+ for index in indexes:
+ if index >= begin_index and index < end_index:
+ coordinate_answer_ids.append(index)
+ if len(coordinate_answer_ids) == len(answer_text):
+ for index in coordinate_answer_ids:
+ answer_ids[index] = 1
+ break
+ return answer_ids
+
+ def _get_answer_ids(self, column_ids, row_ids, answer_coordinates):
+ """Maps answer coordinates of a question to token indexes."""
+ answer_ids, missing_count = self._get_all_answer_ids(column_ids, row_ids, answer_coordinates)
+
+ if missing_count:
+ raise ValueError("Couldn't find all answers")
+ return answer_ids
+
+ def get_answer_ids(self, column_ids, row_ids, tokenized_table, answer_texts_question, answer_coordinates_question):
+ if self.update_answer_coordinates:
+ return self._find_answer_ids_from_answer_texts(
+ column_ids,
+ row_ids,
+ tokenized_table,
+ answer_texts=[self.tokenize(at) for at in answer_texts_question],
+ )
+ return self._get_answer_ids(column_ids, row_ids, answer_coordinates_question)
+
+ def _pad(
+ self,
+ encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding],
+ max_length: Optional[int] = None,
+ padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD,
+ pad_to_multiple_of: Optional[int] = None,
+ return_attention_mask: Optional[bool] = None,
+ ) -> dict:
+ """
+ Pad encoded inputs (on left/right and up to predefined length or max length in the batch)
+
+ Args:
+ encoded_inputs:
+ Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`).
+ max_length: maximum length of the returned list and optionally padding length (see below).
+ Will truncate by taking into account the special tokens.
+ padding_strategy: PaddingStrategy to use for padding.
+
+ - PaddingStrategy.LONGEST Pad to the longest sequence in the batch
+ - PaddingStrategy.MAX_LENGTH: Pad to the max length (default)
+ - PaddingStrategy.DO_NOT_PAD: Do not pad
+ The tokenizer padding sides are defined in self.padding_side:
+
+ - 'left': pads on the left of the sequences
+ - 'right': pads on the right of the sequences
+ pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value.
+ This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta).
+ return_attention_mask:
+ (optional) Set to False to avoid returning attention mask (default: set to model specifics)
+ """
+ # Load from model defaults
+ if return_attention_mask is None:
+ return_attention_mask = "attention_mask" in self.model_input_names
+
+ if padding_strategy == PaddingStrategy.LONGEST:
+ max_length = len(encoded_inputs["input_ids"])
+
+ if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
+ max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
+
+ needs_to_be_padded = (
+ padding_strategy != PaddingStrategy.DO_NOT_PAD and len(encoded_inputs["input_ids"]) != max_length
+ )
+
+ # Initialize attention mask if not present.
+ if return_attention_mask and "attention_mask" not in encoded_inputs:
+ encoded_inputs["attention_mask"] = [1] * len(encoded_inputs["input_ids"])
+
+ if needs_to_be_padded:
+ difference = max_length - len(encoded_inputs["input_ids"])
+ if self.padding_side == "right":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = (
+ encoded_inputs["token_type_ids"] + [[self.pad_token_type_id] * 7] * difference
+ )
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = encoded_inputs["labels"] + [0] * difference
+ if "numeric_values" in encoded_inputs:
+ encoded_inputs["numeric_values"] = encoded_inputs["numeric_values"] + [float("nan")] * difference
+ if "numeric_values_scale" in encoded_inputs:
+ encoded_inputs["numeric_values_scale"] = (
+ encoded_inputs["numeric_values_scale"] + [1.0] * difference
+ )
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference
+ encoded_inputs["input_ids"] = encoded_inputs["input_ids"] + [self.pad_token_id] * difference
+ elif self.padding_side == "left":
+ if return_attention_mask:
+ encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"]
+ if "token_type_ids" in encoded_inputs:
+ encoded_inputs["token_type_ids"] = [[self.pad_token_type_id] * 7] * difference + encoded_inputs[
+ "token_type_ids"
+ ]
+ if "labels" in encoded_inputs:
+ encoded_inputs["labels"] = [0] * difference + encoded_inputs["labels"]
+ if "numeric_values" in encoded_inputs:
+ encoded_inputs["numeric_values"] = [float("nan")] * difference + encoded_inputs["numeric_values"]
+ if "numeric_values_scale" in encoded_inputs:
+ encoded_inputs["numeric_values_scale"] = [1.0] * difference + encoded_inputs[
+ "numeric_values_scale"
+ ]
+ if "special_tokens_mask" in encoded_inputs:
+ encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"]
+ encoded_inputs["input_ids"] = [self.pad_token_id] * difference + encoded_inputs["input_ids"]
+ else:
+ raise ValueError("Invalid padding strategy:" + str(self.padding_side))
+
+ return encoded_inputs
+
+ # Everything related to converting logits to predictions
+
+ def _get_cell_token_probs(self, probabilities, segment_ids, row_ids, column_ids):
+ for i, p in enumerate(probabilities):
+ segment_id = segment_ids[i]
+ col = column_ids[i] - 1
+ row = row_ids[i] - 1
+ if col >= 0 and row >= 0 and segment_id == 1:
+ yield i, p
+
+ def _get_mean_cell_probs(self, probabilities, segment_ids, row_ids, column_ids):
+ """Computes average probability per cell, aggregating over tokens."""
+ coords_to_probs = collections.defaultdict(list)
+ for i, prob in self._get_cell_token_probs(probabilities, segment_ids, row_ids, column_ids):
+ col = column_ids[i] - 1
+ row = row_ids[i] - 1
+ coords_to_probs[(col, row)].append(prob)
+ return {coords: np.array(cell_probs).mean() for coords, cell_probs in coords_to_probs.items()}
+
+ def convert_logits_to_predictions(self, data, logits, logits_agg=None, cell_classification_threshold=0.5):
+ """
+ Converts logits of [`TapasForQuestionAnswering`] to actual predicted answer coordinates and optional
+ aggregation indices.
+
+ The original implementation, on which this function is based, can be found
+ [here](https://github.com/google-research/tapas/blob/4908213eb4df7aa988573350278b44c4dbe3f71b/tapas/experiments/prediction_utils.py#L288).
+
+ Args:
+ data (`dict`):
+ Dictionary mapping features to actual values. Should be created using [`TapasTokenizer`].
+ logits (`torch.Tensor` or `tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Tensor containing the logits at the token level.
+ logits_agg (`torch.Tensor` or `tf.Tensor` of shape `(batch_size, num_aggregation_labels)`, *optional*):
+ Tensor containing the aggregation logits.
+ cell_classification_threshold (`float`, *optional*, defaults to 0.5):
+ Threshold to be used for cell selection. All table cells for which their probability is larger than
+ this threshold will be selected.
+
+ Returns:
+ `tuple` comprising various elements depending on the inputs:
+
+ - predicted_answer_coordinates (`List[List[[tuple]]` of length `batch_size`): Predicted answer coordinates
+ as a list of lists of tuples. Each element in the list contains the predicted answer coordinates of a
+ single example in the batch, as a list of tuples. Each tuple is a cell, i.e. (row index, column index).
+ - predicted_aggregation_indices (`List[int]`of length `batch_size`, *optional*, returned when
+ `logits_aggregation` is provided): Predicted aggregation operator indices of the aggregation head.
+ """
+ # converting to numpy arrays to work with PT/TF
+ logits = logits.numpy()
+ if logits_agg is not None:
+ logits_agg = logits_agg.numpy()
+ data = {key: value.numpy() for key, value in data.items() if key != "training"}
+ # input data is of type float32
+ # np.log(np.finfo(np.float32).max) = 88.72284
+ # Any value over 88.72284 will overflow when passed through the exponential, sending a warning
+ # We disable this warning by truncating the logits.
+ logits[logits < -88.7] = -88.7
+
+ # Compute probabilities from token logits
+ probabilities = 1 / (1 + np.exp(-logits)) * data["attention_mask"]
+ token_types = [
+ "segment_ids",
+ "column_ids",
+ "row_ids",
+ "prev_labels",
+ "column_ranks",
+ "inv_column_ranks",
+ "numeric_relations",
+ ]
+
+ # collect input_ids, segment ids, row ids and column ids of batch. Shape (batch_size, seq_len)
+ input_ids = data["input_ids"]
+ segment_ids = data["token_type_ids"][:, :, token_types.index("segment_ids")]
+ row_ids = data["token_type_ids"][:, :, token_types.index("row_ids")]
+ column_ids = data["token_type_ids"][:, :, token_types.index("column_ids")]
+
+ # next, get answer coordinates for every example in the batch
+ num_batch = input_ids.shape[0]
+ predicted_answer_coordinates = []
+ for i in range(num_batch):
+ probabilities_example = probabilities[i].tolist()
+ segment_ids_example = segment_ids[i]
+ row_ids_example = row_ids[i]
+ column_ids_example = column_ids[i]
+
+ max_width = column_ids_example.max()
+ max_height = row_ids_example.max()
+
+ if max_width == 0 and max_height == 0:
+ continue
+
+ cell_coords_to_prob = self._get_mean_cell_probs(
+ probabilities_example,
+ segment_ids_example.tolist(),
+ row_ids_example.tolist(),
+ column_ids_example.tolist(),
+ )
+
+ # Select the answers above the classification threshold.
+ answer_coordinates = []
+ for col in range(max_width):
+ for row in range(max_height):
+ cell_prob = cell_coords_to_prob.get((col, row), None)
+ if cell_prob is not None:
+ if cell_prob > cell_classification_threshold:
+ answer_coordinates.append((row, col))
+ answer_coordinates = sorted(answer_coordinates)
+ predicted_answer_coordinates.append(answer_coordinates)
+
+ output = (predicted_answer_coordinates,)
+
+ if logits_agg is not None:
+ predicted_aggregation_indices = logits_agg.argmax(axis=-1)
+ output = (predicted_answer_coordinates, predicted_aggregation_indices.tolist())
+
+ return output
+
+ # End of everything related to converting logits to predictions
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
+
+
+# Below: utilities for TAPAS tokenizer (independent from PyTorch/Tensorflow).
+# This includes functions to parse numeric values (dates and numbers) from both the table and questions in order
+# to create the column_ranks, inv_column_ranks, numeric_values, numeric values_scale and numeric_relations in
+# prepare_for_model of TapasTokenizer.
+# These are meant to be used in an academic setup, for production use cases Gold mine or Aqua should be used.
+
+
+# taken from constants.py of the original implementation
+# URL: https://github.com/google-research/tapas/blob/master/tapas/utils/constants.py
+class Relation(enum.Enum):
+ HEADER_TO_CELL = 1 # Connects header to cell.
+ CELL_TO_HEADER = 2 # Connects cell to header.
+ QUERY_TO_HEADER = 3 # Connects query to headers.
+ QUERY_TO_CELL = 4 # Connects query to cells.
+ ROW_TO_CELL = 5 # Connects row to cells.
+ CELL_TO_ROW = 6 # Connects cells to row.
+ EQ = 7 # Annotation value is same as cell value
+ LT = 8 # Annotation value is less than cell value
+ GT = 9 # Annotation value is greater than cell value
+
+
+@dataclass
+class Date:
+ year: Optional[int] = None
+ month: Optional[int] = None
+ day: Optional[int] = None
+
+
+@dataclass
+class NumericValue:
+ float_value: Optional[float] = None
+ date: Optional[Date] = None
+
+
+@dataclass
+class NumericValueSpan:
+ begin_index: int = None
+ end_index: int = None
+ values: List[NumericValue] = None
+
+
+@dataclass
+class Cell:
+ text: str
+ numeric_value: Optional[NumericValue] = None
+
+
+@dataclass
+class Question:
+ original_text: str # The original raw question string.
+ text: str # The question string after normalization.
+ numeric_spans: Optional[List[NumericValueSpan]] = None
+
+
+# Below: all functions from number_utils.py as well as 2 functions (namely get_all_spans and normalize_for_match)
+# from text_utils.py of the original implementation. URL's:
+# - https://github.com/google-research/tapas/blob/master/tapas/utils/number_utils.py
+# - https://github.com/google-research/tapas/blob/master/tapas/utils/text_utils.py
+
+
+# Constants for parsing date expressions.
+# Masks that specify (by a bool) which of (year, month, day) will be populated.
+_DateMask = collections.namedtuple("_DateMask", ["year", "month", "day"])
+
+_YEAR = _DateMask(True, False, False)
+_YEAR_MONTH = _DateMask(True, True, False)
+_YEAR_MONTH_DAY = _DateMask(True, True, True)
+_MONTH = _DateMask(False, True, False)
+_MONTH_DAY = _DateMask(False, True, True)
+
+# Pairs of patterns to pass to 'datetime.strptime' and masks specifying which
+# fields will be set by the corresponding pattern.
+_DATE_PATTERNS = (
+ ("%B", _MONTH),
+ ("%Y", _YEAR),
+ ("%Ys", _YEAR),
+ ("%b %Y", _YEAR_MONTH),
+ ("%B %Y", _YEAR_MONTH),
+ ("%B %d", _MONTH_DAY),
+ ("%b %d", _MONTH_DAY),
+ ("%d %b", _MONTH_DAY),
+ ("%d %B", _MONTH_DAY),
+ ("%B %d, %Y", _YEAR_MONTH_DAY),
+ ("%d %B %Y", _YEAR_MONTH_DAY),
+ ("%m-%d-%Y", _YEAR_MONTH_DAY),
+ ("%Y-%m-%d", _YEAR_MONTH_DAY),
+ ("%Y-%m", _YEAR_MONTH),
+ ("%B %Y", _YEAR_MONTH),
+ ("%d %b %Y", _YEAR_MONTH_DAY),
+ ("%Y-%m-%d", _YEAR_MONTH_DAY),
+ ("%b %d, %Y", _YEAR_MONTH_DAY),
+ ("%d.%m.%Y", _YEAR_MONTH_DAY),
+ ("%A, %b %d", _MONTH_DAY),
+ ("%A, %B %d", _MONTH_DAY),
+)
+
+# This mapping is used to convert date patterns to regex patterns.
+_FIELD_TO_REGEX = (
+ ("%A", r"\w+"), # Weekday as locale’s full name.
+ ("%B", r"\w+"), # Month as locale’s full name.
+ ("%Y", r"\d{4}"), # Year with century as a decimal number.
+ ("%b", r"\w{3}"), # Month as locale’s abbreviated name.
+ ("%d", r"\d{1,2}"), # Day of the month as a zero-padded decimal number.
+ ("%m", r"\d{1,2}"), # Month as a zero-padded decimal number.
+)
+
+
+def _process_date_pattern(dp):
+ """Compute a regex for each date pattern to use as a prefilter."""
+ pattern, mask = dp
+ regex = pattern
+ regex = regex.replace(".", re.escape("."))
+ regex = regex.replace("-", re.escape("-"))
+ regex = regex.replace(" ", r"\s+")
+ for field, field_regex in _FIELD_TO_REGEX:
+ regex = regex.replace(field, field_regex)
+ # Make sure we didn't miss any of the fields.
+ assert "%" not in regex, regex
+ return pattern, mask, re.compile("^" + regex + "$")
+
+
+def _process_date_patterns():
+ return tuple(_process_date_pattern(dp) for dp in _DATE_PATTERNS)
+
+
+_PROCESSED_DATE_PATTERNS = _process_date_patterns()
+
+_MAX_DATE_NGRAM_SIZE = 5
+
+# Following DynSp:
+# https://github.com/Microsoft/DynSP/blob/master/util.py#L414.
+_NUMBER_WORDS = [
+ "zero",
+ "one",
+ "two",
+ "three",
+ "four",
+ "five",
+ "six",
+ "seven",
+ "eight",
+ "nine",
+ "ten",
+ "eleven",
+ "twelve",
+]
+
+_ORDINAL_WORDS = [
+ "zeroth",
+ "first",
+ "second",
+ "third",
+ "fourth",
+ "fith",
+ "sixth",
+ "seventh",
+ "eighth",
+ "ninth",
+ "tenth",
+ "eleventh",
+ "twelfth",
+]
+
+_ORDINAL_SUFFIXES = ["st", "nd", "rd", "th"]
+
+_NUMBER_PATTERN = re.compile(r"((^|\s)[+-])?((\.\d+)|(\d+(,\d\d\d)*(\.\d*)?))")
+
+# Following DynSp:
+# https://github.com/Microsoft/DynSP/blob/master/util.py#L293.
+_MIN_YEAR = 1700
+_MAX_YEAR = 2016
+
+_INF = float("INF")
+
+
+def _get_numeric_value_from_date(date, mask):
+ """Converts date (datetime Python object) to a NumericValue object with a Date object value."""
+ if date.year < _MIN_YEAR or date.year > _MAX_YEAR:
+ raise ValueError(f"Invalid year: {date.year}")
+
+ new_date = Date()
+ if mask.year:
+ new_date.year = date.year
+ if mask.month:
+ new_date.month = date.month
+ if mask.day:
+ new_date.day = date.day
+ return NumericValue(date=new_date)
+
+
+def _get_span_length_key(span):
+ """Sorts span by decreasing length first and increasing first index second."""
+ return span[1] - span[0], -span[0]
+
+
+def _get_numeric_value_from_float(value):
+ """Converts float (Python) to a NumericValue object with a float value."""
+ return NumericValue(float_value=value)
+
+
+# Doesn't parse ordinal expressions such as '18th of february 1655'.
+def _parse_date(text):
+ """Attempts to format a text as a standard date string (yyyy-mm-dd)."""
+ text = re.sub(r"Sept\b", "Sep", text)
+ for in_pattern, mask, regex in _PROCESSED_DATE_PATTERNS:
+ if not regex.match(text):
+ continue
+ try:
+ date = datetime.datetime.strptime(text, in_pattern).date()
+ except ValueError:
+ continue
+ try:
+ return _get_numeric_value_from_date(date, mask)
+ except ValueError:
+ continue
+ return None
+
+
+def _parse_number(text):
+ """Parses simple cardinal and ordinals numbers."""
+ for suffix in _ORDINAL_SUFFIXES:
+ if text.endswith(suffix):
+ text = text[: -len(suffix)]
+ break
+ text = text.replace(",", "")
+ try:
+ value = float(text)
+ except ValueError:
+ return None
+ if math.isnan(value):
+ return None
+ if value == _INF:
+ return None
+ return value
+
+
+def get_all_spans(text, max_ngram_length):
+ """
+ Split a text into all possible ngrams up to 'max_ngram_length'. Split points are white space and punctuation.
+
+ Args:
+ text: Text to split.
+ max_ngram_length: maximal ngram length.
+ Yields:
+ Spans, tuples of begin-end index.
+ """
+ start_indexes = []
+ for index, char in enumerate(text):
+ if not char.isalnum():
+ continue
+ if index == 0 or not text[index - 1].isalnum():
+ start_indexes.append(index)
+ if index + 1 == len(text) or not text[index + 1].isalnum():
+ for start_index in start_indexes[-max_ngram_length:]:
+ yield start_index, index + 1
+
+
+def normalize_for_match(text):
+ return " ".join(text.lower().split())
+
+
+def format_text(text):
+ """Lowercases and strips punctuation."""
+ text = text.lower().strip()
+ if text == "n/a" or text == "?" or text == "nan":
+ text = EMPTY_TEXT
+
+ text = re.sub(r"[^\w\d]+", " ", text).replace("_", " ")
+ text = " ".join(text.split())
+ text = text.strip()
+ if text:
+ return text
+ return EMPTY_TEXT
+
+
+def parse_text(text):
+ """
+ Extracts longest number and date spans.
+
+ Args:
+ text: text to annotate
+
+ Returns:
+ List of longest numeric value spans.
+ """
+ span_dict = collections.defaultdict(list)
+ for match in _NUMBER_PATTERN.finditer(text):
+ span_text = text[match.start() : match.end()]
+ number = _parse_number(span_text)
+ if number is not None:
+ span_dict[match.span()].append(_get_numeric_value_from_float(number))
+
+ for begin_index, end_index in get_all_spans(text, max_ngram_length=1):
+ if (begin_index, end_index) in span_dict:
+ continue
+ span_text = text[begin_index:end_index]
+
+ number = _parse_number(span_text)
+ if number is not None:
+ span_dict[begin_index, end_index].append(_get_numeric_value_from_float(number))
+ for number, word in enumerate(_NUMBER_WORDS):
+ if span_text == word:
+ span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number)))
+ break
+ for number, word in enumerate(_ORDINAL_WORDS):
+ if span_text == word:
+ span_dict[begin_index, end_index].append(_get_numeric_value_from_float(float(number)))
+ break
+
+ for begin_index, end_index in get_all_spans(text, max_ngram_length=_MAX_DATE_NGRAM_SIZE):
+ span_text = text[begin_index:end_index]
+ date = _parse_date(span_text)
+ if date is not None:
+ span_dict[begin_index, end_index].append(date)
+
+ spans = sorted(span_dict.items(), key=lambda span_value: _get_span_length_key(span_value[0]), reverse=True)
+ selected_spans = []
+ for span, value in spans:
+ for selected_span, _ in selected_spans:
+ if selected_span[0] <= span[0] and span[1] <= selected_span[1]:
+ break
+ else:
+ selected_spans.append((span, value))
+
+ selected_spans.sort(key=lambda span_value: span_value[0][0])
+
+ numeric_value_spans = []
+ for span, values in selected_spans:
+ numeric_value_spans.append(NumericValueSpan(begin_index=span[0], end_index=span[1], values=values))
+ return numeric_value_spans
+
+
+# Below: all functions from number_annotation_utils.py and 2 functions (namely filter_invalid_unicode
+# and filter_invalid_unicode_from_table) from text_utils.py of the original implementation. URL's:
+# - https://github.com/google-research/tapas/blob/master/tapas/utils/number_annotation_utils.py
+# - https://github.com/google-research/tapas/blob/master/tapas/utils/text_utils.py
+
+
+_PrimitiveNumericValue = Union[float, Tuple[Optional[float], Optional[float], Optional[float]]]
+_SortKeyFn = Callable[[NumericValue], Tuple[float, Ellipsis]]
+
+_DATE_TUPLE_SIZE = 3
+
+EMPTY_TEXT = "EMPTY"
+
+NUMBER_TYPE = "number"
+DATE_TYPE = "date"
+
+
+def _get_value_type(numeric_value):
+ if numeric_value.float_value is not None:
+ return NUMBER_TYPE
+ elif numeric_value.date is not None:
+ return DATE_TYPE
+ raise ValueError(f"Unknown type: {numeric_value}")
+
+
+def _get_value_as_primitive_value(numeric_value):
+ """Maps a NumericValue proto to a float or tuple of float."""
+ if numeric_value.float_value is not None:
+ return numeric_value.float_value
+ if numeric_value.date is not None:
+ date = numeric_value.date
+ value_tuple = [None, None, None]
+ # All dates fields are cased to float to produce a simple primitive value.
+ if date.year is not None:
+ value_tuple[0] = float(date.year)
+ if date.month is not None:
+ value_tuple[1] = float(date.month)
+ if date.day is not None:
+ value_tuple[2] = float(date.day)
+ return tuple(value_tuple)
+ raise ValueError(f"Unknown type: {numeric_value}")
+
+
+def _get_all_types(numeric_values):
+ return {_get_value_type(value) for value in numeric_values}
+
+
+def get_numeric_sort_key_fn(numeric_values):
+ """
+ Creates a function that can be used as a sort key or to compare the values. Maps to primitive types and finds the
+ biggest common subset. Consider the values "05/05/2010" and "August 2007". With the corresponding primitive values
+ (2010.,5.,5.) and (2007.,8., None). These values can be compared by year and date so we map to the sequence (2010.,
+ 5.), (2007., 8.). If we added a third value "2006" with primitive value (2006., None, None), we could only compare
+ by the year so we would map to (2010.,), (2007.,) and (2006.,).
+
+ Args:
+ numeric_values: Values to compare
+
+ Returns:
+ A function that can be used as a sort key function (mapping numeric values to a comparable tuple)
+
+ Raises:
+ ValueError if values don't have a common type or are not comparable.
+ """
+ value_types = _get_all_types(numeric_values)
+ if len(value_types) != 1:
+ raise ValueError(f"No common value type in {numeric_values}")
+
+ value_type = next(iter(value_types))
+ if value_type == NUMBER_TYPE:
+ # Primitive values are simple floats, nothing to do here.
+ return _get_value_as_primitive_value
+
+ # The type can only be Date at this point which means the primitive type
+ # is a float triple.
+ valid_indexes = set(range(_DATE_TUPLE_SIZE))
+
+ for numeric_value in numeric_values:
+ value = _get_value_as_primitive_value(numeric_value)
+ assert isinstance(value, tuple)
+ for tuple_index, inner_value in enumerate(value):
+ if inner_value is None:
+ valid_indexes.discard(tuple_index)
+
+ if not valid_indexes:
+ raise ValueError(f"No common value in {numeric_values}")
+
+ def _sort_key_fn(numeric_value):
+ value = _get_value_as_primitive_value(numeric_value)
+ return tuple(value[index] for index in valid_indexes)
+
+ return _sort_key_fn
+
+
+def _consolidate_numeric_values(row_index_to_values, min_consolidation_fraction, debug_info):
+ """
+ Finds the most common numeric values in a column and returns them
+
+ Args:
+ row_index_to_values:
+ For each row index all the values in that cell.
+ min_consolidation_fraction:
+ Fraction of cells that need to have consolidated value.
+ debug_info:
+ Additional information only used for logging
+
+ Returns:
+ For each row index the first value that matches the most common value. Rows that don't have a matching value
+ are dropped. Empty list if values can't be consolidated.
+ """
+ type_counts = collections.Counter()
+ for numeric_values in row_index_to_values.values():
+ type_counts.update(_get_all_types(numeric_values))
+ if not type_counts:
+ return {}
+ max_count = max(type_counts.values())
+ if max_count < len(row_index_to_values) * min_consolidation_fraction:
+ # logging.log_every_n(logging.INFO, f'Can\'t consolidate types: {debug_info} {row_index_to_values} {max_count}', 100)
+ return {}
+
+ valid_types = set()
+ for value_type, count in type_counts.items():
+ if count == max_count:
+ valid_types.add(value_type)
+ if len(valid_types) > 1:
+ assert DATE_TYPE in valid_types
+ max_type = DATE_TYPE
+ else:
+ max_type = next(iter(valid_types))
+
+ new_row_index_to_value = {}
+ for index, values in row_index_to_values.items():
+ # Extract the first matching value.
+ for value in values:
+ if _get_value_type(value) == max_type:
+ new_row_index_to_value[index] = value
+ break
+
+ return new_row_index_to_value
+
+
+def _get_numeric_values(text):
+ """Parses text and returns numeric values."""
+ numeric_spans = parse_text(text)
+ return itertools.chain(*(span.values for span in numeric_spans))
+
+
+def _get_column_values(table, col_index):
+ """
+ Parses text in column and returns a dict mapping row_index to values. This is the _get_column_values function from
+ number_annotation_utils.py of the original implementation
+
+ Args:
+ table: Pandas dataframe
+ col_index: integer, indicating the index of the column to get the numeric values of
+ """
+ index_to_values = {}
+ for row_index, row in table.iterrows():
+ text = normalize_for_match(row[col_index].text)
+ index_to_values[row_index] = list(_get_numeric_values(text))
+ return index_to_values
+
+
+def get_numeric_relation(value, other_value, sort_key_fn):
+ """Compares two values and returns their relation or None."""
+ value = sort_key_fn(value)
+ other_value = sort_key_fn(other_value)
+ if value == other_value:
+ return Relation.EQ
+ if value < other_value:
+ return Relation.LT
+ if value > other_value:
+ return Relation.GT
+ return None
+
+
+def add_numeric_values_to_question(question):
+ """Adds numeric value spans to a question."""
+ original_text = question
+ question = normalize_for_match(question)
+ numeric_spans = parse_text(question)
+ return Question(original_text=original_text, text=question, numeric_spans=numeric_spans)
+
+
+def filter_invalid_unicode(text):
+ """Return an empty string and True if 'text' is in invalid unicode."""
+ return ("", True) if isinstance(text, bytes) else (text, False)
+
+
+def filter_invalid_unicode_from_table(table):
+ """
+ Removes invalid unicode from table. Checks whether a table cell text contains an invalid unicode encoding. If yes,
+ reset the table cell text to an empty str and log a warning for each invalid cell
+
+ Args:
+ table: table to clean.
+ """
+ # to do: add table id support
+ if not hasattr(table, "table_id"):
+ table.table_id = 0
+
+ for row_index, row in table.iterrows():
+ for col_index, cell in enumerate(row):
+ cell, is_invalid = filter_invalid_unicode(cell)
+ if is_invalid:
+ logging.warning(
+ f"Scrub an invalid table body @ table_id: {table.table_id}, row_index: {row_index}, "
+ f"col_index: {col_index}",
+ )
+ for col_index, column in enumerate(table.columns):
+ column, is_invalid = filter_invalid_unicode(column)
+ if is_invalid:
+ logging.warning(f"Scrub an invalid table header @ table_id: {table.table_id}, col_index: {col_index}")
+
+
+def add_numeric_table_values(table, min_consolidation_fraction=0.7, debug_info=None):
+ """
+ Parses text in table column-wise and adds the consolidated values. Consolidation refers to finding values with a
+ common types (date or number)
+
+ Args:
+ table:
+ Table to annotate.
+ min_consolidation_fraction:
+ Fraction of cells in a column that need to have consolidated value.
+ debug_info:
+ Additional information used for logging.
+ """
+ table = table.copy()
+ # First, filter table on invalid unicode
+ filter_invalid_unicode_from_table(table)
+
+ # Second, replace cell values by Cell objects
+ for row_index, row in table.iterrows():
+ for col_index, cell in enumerate(row):
+ table.iloc[row_index, col_index] = Cell(text=cell)
+
+ # Third, add numeric_value attributes to these Cell objects
+ for col_index, column in enumerate(table.columns):
+ column_values = _consolidate_numeric_values(
+ _get_column_values(table, col_index),
+ min_consolidation_fraction=min_consolidation_fraction,
+ debug_info=(debug_info, column),
+ )
+
+ for row_index, numeric_value in column_values.items():
+ table.iloc[row_index, col_index].numeric_value = numeric_value
+
+ return table
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vipllava/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/vipllava/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2853605ba2d275073624c1b189ee5247eba695ca
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vipllava/__init__.py
@@ -0,0 +1,54 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {"configuration_vipllava": ["VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP", "VipLlavaConfig"]}
+
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_vipllava"] = [
+ "VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "VipLlavaForConditionalGeneration",
+ "VipLlavaPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_vipllava import VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP, VipLlavaConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_vipllava import (
+ VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST,
+ VipLlavaForConditionalGeneration,
+ VipLlavaPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d819c677820c381149a4a9c3e0e347d81ce32b6b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/configuration_vipllava.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/configuration_vipllava.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4826870640adb0ea9c6e8fd4d74d523b02bebb52
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/configuration_vipllava.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/convert_vipllava_weights_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/convert_vipllava_weights_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5888618c31f68289a10ca7916a5c0aeca0e2f338
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/convert_vipllava_weights_to_hf.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/modeling_vipllava.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/modeling_vipllava.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..755f9c637167ec6aeae4f96d3a3bf41bbde92a03
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vipllava/__pycache__/modeling_vipllava.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vipllava/configuration_vipllava.py b/venv/lib/python3.10/site-packages/transformers/models/vipllava/configuration_vipllava.py
new file mode 100644
index 0000000000000000000000000000000000000000..d57f4179492ea20996ec5d340f53b870273a14c2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vipllava/configuration_vipllava.py
@@ -0,0 +1,144 @@
+# coding=utf-8
+# Copyright 2023 Microsoft Research & University of Wisconsin-Madison and the HuggingFace Inc. team. All rights reserved.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" VipLlava model configuration"""
+
+import warnings
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+from ..auto import CONFIG_MAPPING
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import VIPLLAVA_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class VipLlavaConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`VipLlavaForConditionalGeneration`]. It is used to instantiate an
+ VipLlava model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the VipLlava-9B.
+
+ e.g. [ybelkada/vip-llava-7b-hf](https://huggingface.co/ybelkada/vip-llava-7b-hf)
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vision_config (`VipLlavaVisionConfig`, *optional*):
+ Custom vision config or dict
+ text_config (`Union[AutoConfig, dict]`, *optional*):
+ The config object of the text backbone. Can be any of `LlamaConfig` or `MistralConfig`.
+ ignore_index (`int`, *optional*, defaults to -100):
+ The ignore index for the loss function.
+ image_token_index (`int`, *optional*, defaults to 32000):
+ The image token index to encode the image prompt.
+ projector_hidden_act (`str`, *optional*, defaults to `"gelu"`):
+ The activation function used by the multimodal projector.
+ projector_layernorm_eps (`float`, *optional*, defaults to 1e-05):
+ The layer norm epsilon of the projector layernorm
+ vision_feature_layers (`List[int]`, *optional*, defaults to `[-2, -5, -8, -11, 6]`):
+ The list of layers to select the vision features from.
+
+ Example:
+
+ ```python
+ >>> from transformers import VipLlavaForConditionalGeneration, VipLlavaConfig, CLIPVisionConfig, LlamaConfig
+
+ >>> # Initializing a CLIP-vision config
+ >>> vision_config = CLIPVisionConfig()
+
+ >>> # Initializing a Llama config
+ >>> text_config = LlamaConfig()
+
+ >>> # Initializing a VipLlava vipllava-7b style configuration
+ >>> configuration = VipLlavaConfig(vision_config, text_config)
+
+ >>> # Initializing a model from the vipllava-7b style configuration
+ >>> model = VipLlavaForConditionalGeneration(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "vipllava"
+ is_composition = False
+
+ def __init__(
+ self,
+ vision_config=None,
+ text_config=None,
+ ignore_index=-100,
+ image_token_index=32000,
+ projector_hidden_act="gelu",
+ projector_layernorm_eps=1e-5,
+ vision_feature_layers=[-2, -5, -8, -11, 6],
+ **kwargs,
+ ):
+ self.ignore_index = ignore_index
+ self.image_token_index = image_token_index
+ self.projector_hidden_act = projector_hidden_act
+ self.projector_layernorm_eps = projector_layernorm_eps
+ self.vision_feature_layers = vision_feature_layers
+
+ if "vocab_size" in kwargs:
+ warnings.warn(
+ "The `vocab_size` argument is deprecated and will be removed in v4.42, since it can be inferred from the `text_config`. Passing this argument has no effect",
+ FutureWarning,
+ )
+
+ self.vision_config = vision_config
+
+ if isinstance(self.vision_config, dict):
+ vision_config["model_type"] = (
+ vision_config["model_type"] if "model_type" in vision_config else "clip_vision_model"
+ )
+ self.vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
+ elif vision_config is None:
+ self.vision_config = CONFIG_MAPPING["clip_vision_model"](
+ intermediate_size=4096,
+ hidden_size=1024,
+ patch_size=14,
+ image_size=336,
+ num_hidden_layers=24,
+ num_attention_heads=16,
+ vocab_size=32000,
+ projection_dim=768,
+ )
+
+ if isinstance(text_config, dict):
+ text_config["model_type"] = text_config["model_type"] if "model_type" in text_config else "llama"
+ text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
+ elif text_config is None:
+ text_config = CONFIG_MAPPING["llama"]()
+
+ self.text_config = text_config
+ self._vocab_size = self.text_config.vocab_size
+
+ super().__init__(**kwargs)
+
+ @property
+ def vocab_size(self):
+ warnings.warn(
+ "The `vocab_size` attribute is deprecated and will be removed in v4.42, Please use `text_config.vocab_size` instead.",
+ FutureWarning,
+ )
+ return self._vocab_size
+
+ def to_dict(self):
+ output = super().to_dict()
+ output.pop("_vocab_size", None)
+ return output
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vipllava/convert_vipllava_weights_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/vipllava/convert_vipllava_weights_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..2914cfdfcd4b421c5c020852364034037fcc9558
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vipllava/convert_vipllava_weights_to_hf.py
@@ -0,0 +1,132 @@
+# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import argparse
+
+import torch
+from huggingface_hub import hf_hub_download
+
+from transformers import (
+ AddedToken,
+ AutoConfig,
+ AutoTokenizer,
+ CLIPImageProcessor,
+ LlavaProcessor,
+ VipLlavaConfig,
+ VipLlavaForConditionalGeneration,
+)
+
+
+KEYS_TO_MODIFY_MAPPING = {
+ "model.vision_tower.": "",
+ "model.mm_projector": "multi_modal_projector",
+ "model": "model.model",
+ "vision_model.model": "vision_model",
+ "lm_head": "language_model.lm_head",
+ "model.model": "language_model.model",
+ "multi_modal_projector.0": "multi_modal_projector.linear_1",
+ "multi_modal_projector.2": "multi_modal_projector.linear_2",
+ "final_linear.0": "linear_1",
+ "final_linear.2": "linear_2",
+ "multi_modal_projector.clip_layernorm": "multi_modal_projector.projector_layernorm",
+}
+
+
+# Copied from transformers.models.llava.convert_llava_weights_to_hf.convert_state_dict_to_hf
+def convert_state_dict_to_hf(state_dict):
+ new_state_dict = {}
+ for key, value in state_dict.items():
+ if key.endswith(".inv_freq"):
+ continue
+ for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
+ if key_to_modify in key:
+ key = key.replace(key_to_modify, new_key)
+ new_state_dict[key] = value
+ return new_state_dict
+
+
+def convert_vipllava_llama_to_hf(text_model_id, vision_model_id, output_hub_path, old_state_dict_id):
+ torch.set_default_dtype(torch.float16)
+ text_config = AutoConfig.from_pretrained(text_model_id)
+
+ tokenizer = AutoTokenizer.from_pretrained(text_model_id)
+ tokenizer.add_tokens(AddedToken("", special=True, normalized=False), special_tokens=True)
+ tokenizer.add_special_tokens({"pad_token": ""})
+
+ image_processor = CLIPImageProcessor.from_pretrained(vision_model_id)
+
+ processor = LlavaProcessor(tokenizer=tokenizer, image_processor=image_processor)
+
+ config = VipLlavaConfig(text_config=text_config)
+ config.pad_token_id = 32001
+
+ with torch.device("meta"):
+ model = VipLlavaForConditionalGeneration(config)
+
+ # Pad to 64 for performance reasons
+ pad_shape = 64
+
+ state_dict_path = hf_hub_download(old_state_dict_id, "model_state_dict_7b.bin")
+
+ state_dict = torch.load(state_dict_path, map_location="cpu")
+ state_dict = convert_state_dict_to_hf(state_dict)
+
+ model.load_state_dict(state_dict, strict=True, assign=True)
+
+ pre_expansion_embeddings = model.language_model.model.embed_tokens.weight.data
+ mu = torch.mean(pre_expansion_embeddings, dim=0).float()
+ n = pre_expansion_embeddings.size()[0]
+ sigma = ((pre_expansion_embeddings - mu).T @ (pre_expansion_embeddings - mu)) / n
+ dist = torch.distributions.multivariate_normal.MultivariateNormal(mu, covariance_matrix=1e-5 * sigma)
+
+ # We add an image token so we resize the model
+ model.resize_token_embeddings(config.text_config.vocab_size + 2, pad_shape)
+ model.language_model.model.embed_tokens.weight.data[32000:] = torch.stack(
+ tuple((dist.sample() for _ in range(model.language_model.model.embed_tokens.weight.data[32000:].shape[0]))),
+ dim=0,
+ )
+ model.language_model.lm_head.weight.data[32000:] = torch.stack(
+ tuple((dist.sample() for _ in range(model.language_model.lm_head.weight.data[32000:].shape[0]))),
+ dim=0,
+ )
+
+ model.push_to_hub(output_hub_path)
+ processor.push_to_hub(output_hub_path)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--text_model_id",
+ help="Hub location of the text model",
+ )
+ parser.add_argument(
+ "--vision_model_id",
+ help="Hub location of the vision model",
+ )
+ parser.add_argument(
+ "--output_hub_path",
+ help="Location on the hub of the converted model",
+ )
+ parser.add_argument(
+ "--old_state_dict_id",
+ help="Location on the hub of the raw state dict of the original model. The filename needs to be `model_state_dict.bin`",
+ )
+ args = parser.parse_args()
+ convert_vipllava_llama_to_hf(
+ args.text_model_id, args.vision_model_id, args.output_hub_path, args.old_state_dict_id
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vipllava/modeling_vipllava.py b/venv/lib/python3.10/site-packages/transformers/models/vipllava/modeling_vipllava.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b20353410c895d64a859e26f0cd838a45141c26
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/vipllava/modeling_vipllava.py
@@ -0,0 +1,566 @@
+# coding=utf-8
+# Copyright 2023 the HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch VipLlava model."""
+from dataclasses import dataclass
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ... import PreTrainedModel
+from ...activations import ACT2FN
+from ...cache_utils import Cache
+from ...modeling_outputs import ModelOutput
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ..auto import AutoModel, AutoModelForCausalLM
+from .configuration_vipllava import VipLlavaConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "VipLlavaConfig"
+
+
+from ..deprecated._archive_maps import VIPLLAVA_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+# Copied from transformers.models.idefics.modeling_idefics.IdeficsCausalLMOutputWithPast with Idefics->VipLlava
+class VipLlavaCausalLMOutputWithPast(ModelOutput):
+ """
+ Base class for VipLlava causal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
+ sequence_length, hidden_size)`.
+
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ past_key_values: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+class VipLlavaMultiModalProjector(nn.Module):
+ def __init__(self, config: VipLlavaConfig):
+ super().__init__()
+ self.projector_layernorm = nn.LayerNorm(
+ len(config.vision_feature_layers) * config.vision_config.hidden_size, eps=config.projector_layernorm_eps
+ )
+
+ self.linear_1 = nn.Linear(
+ len(config.vision_feature_layers) * config.vision_config.hidden_size,
+ config.text_config.hidden_size,
+ bias=True,
+ )
+ self.act = ACT2FN[config.projector_hidden_act]
+ self.linear_2 = nn.Linear(config.text_config.hidden_size, config.text_config.hidden_size, bias=True)
+
+ def forward(self, hidden_states):
+ hidden_states = self.projector_layernorm(hidden_states)
+ hidden_states = self.linear_1(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.linear_2(hidden_states)
+ return hidden_states
+
+
+VIPLLAVA_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`VipLlavaConfig`] or [`VipLlavaVisionConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare VipLlava Model outputting raw hidden-states without any specific head on top.",
+ VIPLLAVA_START_DOCSTRING,
+)
+# Copied from transformers.models.llava.modeling_llava.LlavaPreTrainedModel with Llava->VipLlava,llava->vipllava
+class VipLlavaPreTrainedModel(PreTrainedModel):
+ config_class = VipLlavaConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["VipLlavaVisionAttention"]
+ _skip_keys_device_placement = "past_key_values"
+ _supports_flash_attn_2 = True
+
+ def _init_weights(self, module):
+ # important: this ported version of VipLlava isn't meant for training from scratch - only
+ # inference and fine-tuning - so the proper init weights code has been removed - the original codebase
+ # https://github.com/haotian-liu/LLaVA/tree/main/vipllava should serve for that purpose
+ std = (
+ self.config.initializer_range
+ if hasattr(self.config, "initializer_range")
+ else self.config.text_config.initializer_range
+ )
+
+ if hasattr(module, "class_embedding"):
+ module.class_embedding.data.normal_(mean=0.0, std=std)
+
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ @property
+ def _supports_sdpa(self):
+ """
+ Retrieve language_model's attribute to check whether the model supports
+ SDPA or not.
+ """
+ return self.language_model._supports_sdpa
+
+
+VIPLLAVA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, image_size, image_size)):
+ The tensors corresponding to the input images. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details ([]`LlavaProcessor`] uses
+ [`CLIPImageProcessor`] for processing images).
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ """The VIPLLAVA model which consists of a vision backbone and a language model.""",
+ VIPLLAVA_START_DOCSTRING,
+)
+# Copied from transformers.models.llava.modeling_llava.LlavaForConditionalGeneration with LLAVA->VIPLLAVA,Llava->VipLlava
+class VipLlavaForConditionalGeneration(VipLlavaPreTrainedModel):
+ def __init__(self, config: VipLlavaConfig):
+ super().__init__(config)
+ self.vision_tower = AutoModel.from_config(config.vision_config)
+
+ self.multi_modal_projector = VipLlavaMultiModalProjector(config)
+ self.vocab_size = config.text_config.vocab_size
+ self.language_model = AutoModelForCausalLM.from_config(
+ config.text_config, attn_implementation=config._attn_implementation
+ )
+ self.pad_token_id = self.config.pad_token_id if self.config.pad_token_id is not None else -1
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.language_model.get_input_embeddings()
+
+ def set_input_embeddings(self, value):
+ self.language_model.set_input_embeddings(value)
+
+ def get_output_embeddings(self):
+ return self.language_model.get_output_embeddings()
+
+ def set_output_embeddings(self, new_embeddings):
+ self.language_model.set_output_embeddings(new_embeddings)
+
+ def set_decoder(self, decoder):
+ self.language_model.set_decoder(decoder)
+
+ def get_decoder(self):
+ return self.language_model.get_decoder()
+
+ def tie_weights(self):
+ return self.language_model.tie_weights()
+
+ def resize_token_embeddings(self, new_num_tokens: Optional[int] = None, pad_to_multiple_of=None) -> nn.Embedding:
+ model_embeds = self.language_model.resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
+ # update vocab size
+ self.config.text_config.vocab_size = model_embeds.num_embeddings
+ self.vocab_size = model_embeds.num_embeddings
+ return model_embeds
+
+ def _merge_input_ids_with_image_features(self, image_features, inputs_embeds, input_ids, attention_mask, labels):
+ num_images, num_image_patches, embed_dim = image_features.shape
+ batch_size, sequence_length = input_ids.shape
+ left_padding = not torch.sum(input_ids[:, -1] == torch.tensor(self.pad_token_id))
+ # 1. Create a mask to know where special image tokens are
+ special_image_token_mask = input_ids == self.config.image_token_index
+ num_special_image_tokens = torch.sum(special_image_token_mask, dim=-1)
+ # Compute the maximum embed dimension
+ max_embed_dim = (num_special_image_tokens.max() * (num_image_patches - 1)) + sequence_length
+ batch_indices, non_image_indices = torch.where(input_ids != self.config.image_token_index)
+
+ # 2. Compute the positions where text should be written
+ # Calculate new positions for text tokens in merged image-text sequence.
+ # `special_image_token_mask` identifies image tokens. Each image token will be replaced by `nb_text_tokens_per_images - 1` text tokens.
+ # `torch.cumsum` computes how each image token shifts subsequent text token positions.
+ # - 1 to adjust for zero-based indexing, as `cumsum` inherently increases indices by one.
+ new_token_positions = torch.cumsum((special_image_token_mask * (num_image_patches - 1) + 1), -1) - 1
+ nb_image_pad = max_embed_dim - 1 - new_token_positions[:, -1]
+ if left_padding:
+ new_token_positions += nb_image_pad[:, None] # offset for left padding
+ text_to_overwrite = new_token_positions[batch_indices, non_image_indices]
+
+ # 3. Create the full embedding, already padded to the maximum position
+ final_embedding = torch.zeros(
+ batch_size, max_embed_dim, embed_dim, dtype=inputs_embeds.dtype, device=inputs_embeds.device
+ )
+ final_attention_mask = torch.zeros(
+ batch_size, max_embed_dim, dtype=attention_mask.dtype, device=inputs_embeds.device
+ )
+ if labels is not None:
+ final_labels = torch.full(
+ (batch_size, max_embed_dim), self.config.ignore_index, dtype=input_ids.dtype, device=input_ids.device
+ )
+ # In case the Vision model or the Language model has been offloaded to CPU, we need to manually
+ # set the corresponding tensors into their correct target device.
+ target_device = inputs_embeds.device
+ batch_indices, non_image_indices, text_to_overwrite = (
+ batch_indices.to(target_device),
+ non_image_indices.to(target_device),
+ text_to_overwrite.to(target_device),
+ )
+ attention_mask = attention_mask.to(target_device)
+
+ # 4. Fill the embeddings based on the mask. If we have ["hey" "", "how", "are"]
+ # we need to index copy on [0, 577, 578, 579] for the text and [1:576] for the image features
+ final_embedding[batch_indices, text_to_overwrite] = inputs_embeds[batch_indices, non_image_indices]
+ final_attention_mask[batch_indices, text_to_overwrite] = attention_mask[batch_indices, non_image_indices]
+ if labels is not None:
+ final_labels[batch_indices, text_to_overwrite] = labels[batch_indices, non_image_indices]
+
+ # 5. Fill the embeddings corresponding to the images. Anything that is still zeros needs filling
+ image_to_overwrite = torch.all(final_embedding == 0, dim=-1)
+ image_to_overwrite &= image_to_overwrite.cumsum(-1) - 1 >= nb_image_pad[:, None].to(target_device)
+
+ if image_to_overwrite.sum() != image_features.shape[:-1].numel():
+ raise ValueError(
+ f"The input provided to the model are wrong. The number of image tokens is {torch.sum(special_image_token_mask)} while"
+ f" the number of image given to the model is {num_images}. This prevents correct indexing and breaks batch generation."
+ )
+
+ final_embedding[image_to_overwrite] = image_features.contiguous().reshape(-1, embed_dim).to(target_device)
+ final_attention_mask |= image_to_overwrite
+ position_ids = (final_attention_mask.cumsum(-1) - 1).masked_fill_((final_attention_mask == 0), 1)
+
+ # 6. Mask out the embedding at padding positions, as we later use the past_key_value value to determine the non-attended tokens.
+ batch_indices, pad_indices = torch.where(input_ids == self.pad_token_id)
+ indices_to_mask = new_token_positions[batch_indices, pad_indices]
+
+ final_embedding[batch_indices, indices_to_mask] = 0
+
+ if labels is None:
+ final_labels = None
+
+ return final_embedding, final_attention_mask, final_labels, position_ids
+
+ @add_start_docstrings_to_model_forward(VIPLLAVA_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=VipLlavaCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ # Ignore copy
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ pixel_values: torch.FloatTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ vision_feature_layers: Optional[List[int]] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, VipLlavaCausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import torch
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, VipLlavaForConditionalGeneration
+
+ >>> model = VipLlavaForConditionalGeneration.from_pretrained("llava-hf/vip-llava-7b-hf", device_map="auto", torch_dtype=torch.float16)
+ >>> processor = AutoProcessor.from_pretrained("llava-hf/vip-llava-7b-hf")
+
+ >>> prompt = "A chat between a curious human and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the human's questions.###Human: \n{}###Assistant:"
+ >>> question = "Can you please describe this image?"
+ >>> prompt = prompt.format(question)
+ >>> url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/compel-neg.png"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(text=text, images=image, return_tensors="pt").to(0, torch.float16)
+
+ >>> # Generate
+ >>> generate_ids = model.generate(**inputs, max_new_tokens=20)
+ >>> processor.decode(generate_ids[0][len(inputs["input_ids"][0]):], skip_special_tokens=True)
+ The image features a brown and white cat sitting on a green surface, with a red ball in its
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ vision_feature_layers = (
+ vision_feature_layers if vision_feature_layers is not None else self.config.vision_feature_layers
+ )
+
+ if inputs_embeds is None:
+ # 1. Extra the input embeddings
+ inputs_embeds = self.get_input_embeddings()(input_ids)
+
+ # 2. Merge text and images
+ if pixel_values is not None and input_ids.shape[1] != 1:
+ image_outputs = self.vision_tower(pixel_values, output_hidden_states=True)
+ # For VIP-llava, the image features are computed this way
+ # We select the features from index 1: for the layers -2, -5, -8, -11 and 6
+ image_features = [image_outputs.hidden_states[index][:, 1:] for index in vision_feature_layers]
+ image_features = torch.cat(image_features, dim=-1)
+
+ image_features = self.multi_modal_projector(image_features)
+ inputs_embeds, attention_mask, labels, position_ids = self._merge_input_ids_with_image_features(
+ image_features, inputs_embeds, input_ids, attention_mask, labels
+ )
+ if labels is None:
+ labels = torch.full_like(attention_mask, self.config.ignore_index).to(torch.long)
+ else:
+ # In case input_ids.shape[1] == 1 & pixel_values==None & past_key_values != None, we are in the case of
+ # generation with cache
+ if past_key_values is not None and pixel_values is not None and input_ids.shape[1] == 1:
+ # Retrieve the first layer to inspect the logits and mask out the hidden states
+ # that are set to 0
+ first_layer_past_key_value = past_key_values[0][0][:, :, :, 0]
+
+ # Sum all dimensions of head_dim (-1) to avoid random errors such as: https://github.com/huggingface/transformers/pull/28032#issuecomment-1863691941
+ batch_index, non_attended_tokens = torch.where(first_layer_past_key_value.float().sum(-2) == 0)
+
+ target_length = input_ids.shape[1]
+ past_length = first_layer_past_key_value.shape[-1]
+
+ extended_attention_mask = torch.ones(
+ (attention_mask.shape[0], past_length),
+ dtype=attention_mask.dtype,
+ device=attention_mask.device,
+ )
+
+ # Filter out only the tokens that can be un-attended, this can happen
+ # in the case one uses Llava + Fused modules where the cache on the
+ # first iteration is already big enough, or if one passes custom cache
+ valid_indices = non_attended_tokens < extended_attention_mask.size(-1)
+ new_batch_index = batch_index[valid_indices]
+ new_non_attended_tokens = non_attended_tokens[valid_indices]
+
+ # Zero-out the places where we don't need to attend
+ extended_attention_mask[new_batch_index, new_non_attended_tokens] = 0
+
+ attention_mask = torch.cat((extended_attention_mask, attention_mask[:, -target_length:]), dim=1)
+ position_ids = torch.sum(attention_mask, dim=1).unsqueeze(-1) - 1
+
+ outputs = self.language_model(
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ logits = outputs[0]
+
+ loss = None
+ if labels is not None:
+ # Shift so that tokens < n predict n
+ if attention_mask is not None:
+ shift_attention_mask = attention_mask[..., 1:]
+ shift_logits = logits[..., :-1, :][shift_attention_mask.to(logits.device) != 0].contiguous()
+ shift_labels = labels[..., 1:][shift_attention_mask.to(labels.device) != 0].contiguous()
+ else:
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = nn.CrossEntropyLoss()
+ loss = loss_fct(
+ shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1).to(shift_logits.device)
+ )
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return VipLlavaCausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, inputs_embeds=None, pixel_values=None, attention_mask=None, **kwargs
+ ):
+ if past_key_values is not None:
+ if isinstance(past_key_values, Cache):
+ cache_length = past_key_values.get_seq_length()
+ past_length = past_key_values.seen_tokens
+ else:
+ cache_length = past_length = past_key_values[0][0].shape[2]
+
+ # Keep only the unprocessed tokens:
+ # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where
+ # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as
+ # input)
+ if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]:
+ input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :]
+ # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard
+ # input_ids based on the past_length.
+ elif past_length < input_ids.shape[1]:
+ input_ids = input_ids[:, past_length:]
+ # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens.
+ elif self.config.image_token_index in input_ids:
+ input_ids = input_ids[:, input_ids.shape[1] - 1 :]
+ # If the cache has seen more tokens than it can hold, then the cache has a size limit. Let's discard the
+ # older attention values, as their corresponding values are not part of the input.
+ if cache_length < past_length and attention_mask is not None:
+ attention_mask = attention_mask[:, -(cache_length + input_ids.shape[1]) :]
+
+ position_ids = kwargs.get("position_ids", None)
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -input_ids.shape[1] :]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "position_ids": position_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "attention_mask": attention_mask,
+ "pixel_values": pixel_values,
+ }
+ )
+ return model_inputs
+
+ def _reorder_cache(self, *args, **kwargs):
+ return self.language_model._reorder_cache(*args, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/configuration_vivit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/configuration_vivit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..697a9a3a135c1d023bfbe60aca91e49d99e3e083
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/configuration_vivit.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/modeling_vivit.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/modeling_vivit.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..10019f4128d0ef615e624afcec8b5152e0c8579b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vivit/__pycache__/modeling_vivit.cpython-310.pyc differ