diff --git a/.gitattributes b/.gitattributes
index 119c673093e5ad67dc8cce398c183a60cf1511a1..e0acd198b31bbec2cb06f206e0fe70e1d6833978 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -86,3 +86,4 @@ venv/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so f
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 filter=lfs diff=lfs merge=lfs -text
diff --git a/ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6ccd2f54435312d0c5ca4601fbe5724c5cf5a63a
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/12.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c2c6372060aa5a9a5d7a3729372f220371e10204228a5324d4c856143612ec7f
+size 33555533
diff --git a/ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..84a45a3de43c826256fd9677379ec1d07f25fc3c
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6aa5aac467540abc95e56cff0883619ff97057048941daef673b28421a4a2aa0
+size 33555612
diff --git a/ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6bf7cd7d4fb461ac3cf4ee9cd30d8bb6413f149c
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:794dab55b3801c5d099c4c17318b75f4c1fa3b2c3925b0c07f01ad345300fa26
+size 33555627
diff --git a/ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..73c4864a12515a743e76636b8cda144ef0582f33
--- /dev/null
+++ b/ckpts/universal/global_step20/zero/22.mlp.dense_h_to_4h.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:5b12327ec966e04b84f9d8b439ee3db92bcc0e21332173187fa78b6fcd096375
+size 33555533
diff --git a/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_2-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_2-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..72ab237e58550ef3d5f57edcc44f716e0ebece64
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_2-v0-loglikelihood
@@ -0,0 +1 @@
+123e2acd00fbba60aba1fbae607c79a062e512c9e79c7d8dfafff63e30111d76
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relative_clause-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relative_clause-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..1fddc2190c85c0161921a5a4026cd518445fc386
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_distractor_agreement_relative_clause-v0-loglikelihood
@@ -0,0 +1 @@
+bf78e2b53c0f3531303c668c96bd3897a0a35e960da37439e63724ecba4e371a
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..076319f01e4309fae1bebb80834d35ebdebec6ec
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_existential_there_quantifiers_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_existential_there_quantifiers_1": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_irregular_past_participle_adjectives-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_irregular_past_participle_adjectives-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..a030be1d72c6a2d1794464b4c9b0cf2e48454197
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_irregular_past_participle_adjectives-v0-loglikelihood
@@ -0,0 +1 @@
+47c56f336df11924d8b97feb46339ce55bea4b216b6fd13946cc999ea36a4a95
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_irregular_past_participle_adjectives-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_irregular_past_participle_adjectives-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..e3b8718ff8cee5d379a4ec8e8bda05b8a8d3e8b8
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_irregular_past_participle_adjectives-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_irregular_past_participle_adjectives": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_irregular_past_participle_adjectives": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_2-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_2-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..3b0f9763529ee45a97ab0abdfd18efc9fe991241
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_2-v0-loglikelihood
@@ -0,0 +1 @@
+d255a10a34f14d77d9526604a17b0f6747d32f62fc2e3a09e9ab10054535fd45
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..1a88f8fa87a86cbff20d0def9955059e9cd73861
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap-v0-loglikelihood
@@ -0,0 +1 @@
+d5486ffcc075cad4302e37ece9bbf5b2063c0b5a48e76c8e1dd365e22a5a48fc
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap_long_distance-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap_long_distance-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..f83ed1fb7413ddccae66c32078a9a5f7b19eb03e
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap_long_distance-v0-loglikelihood
@@ -0,0 +1 @@
+37483dfda688b62ad27161c9fc1e1e7710c5a6e6a7cd3474df119bcafd30e97f
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_age-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_english_age-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..598d2cce10cc3ecefb6eb8d1deb74801e25b11af
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_age-v0-loglikelihood
@@ -0,0 +1 @@
+de74d2ac7f926f2f486c045d84aae8f71711102f9d77b31f758fd148810d13d3
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_religion-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_english_religion-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..b56bc901ca48380f5a188f9c18ef12ba0abe49ca
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_religion-v0-loglikelihood
@@ -0,0 +1 @@
+2ed57377174adaf0fb30037eb055eafdd02cd46e57bc32066d5fecd90a14b6e1
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_age-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_french_age-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..dbec353c35db547d54e918c718164a0788abc569
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_age-v0-loglikelihood
@@ -0,0 +1 @@
+b14a5769f415a234abe89063a1b546aa4a990c84217e5d4a697874cd7f85af35
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_gender-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_french_gender-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..c1713a5a881657c9ae4417f6adcf7480491a2915
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_gender-v0-loglikelihood
@@ -0,0 +1 @@
+010b8404655911c86555616da23afffce9dc3981e1acbbfdb022d9c474430209
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_sexual_orientation-v0-res.json b/lm-evaluation-harness/tests/testdata/crows_pairs_french_sexual_orientation-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..5bb8a4336d89c12896186dc53f0bdd7f480c8df0
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_sexual_orientation-v0-res.json
@@ -0,0 +1 @@
+{"results": {"crows_pairs_french_sexual_orientation": {"likelihood_difference": 0.3160680928470684, "likelihood_difference_stderr": 0.02397758321605678, "pct_stereotype": 0.43956043956043955, "pct_stereotype_stderr": 0.05231815698566189}}, "versions": {"crows_pairs_french_sexual_orientation": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_socioeconomic-v0-res.json b/lm-evaluation-harness/tests/testdata/crows_pairs_french_socioeconomic-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..7372018798d522cdfda7e458f1d608f1a3c13169
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_socioeconomic-v0-res.json
@@ -0,0 +1 @@
+{"results": {"crows_pairs_french_socioeconomic": {"likelihood_difference": 0.3394681494647815, "likelihood_difference_stderr": 0.01702488895584347, "pct_stereotype": 0.4642857142857143, "pct_stereotype_stderr": 0.035714285714285705}}, "versions": {"crows_pairs_french_socioeconomic": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/drop-v0-greedy_until b/lm-evaluation-harness/tests/testdata/drop-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..6470b349d2e2a54c1ab113346885eb97c045a0ed
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/drop-v0-greedy_until
@@ -0,0 +1 @@
+ca566c630d8ac853d5785d4b5c40a5137172c34b48af3350e1f79e6d548b36ba
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/ethics_justice-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/ethics_justice-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..cc18a7e67b6f38aaf759bb9073314da42b86f992
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/ethics_justice-v0-loglikelihood
@@ -0,0 +1 @@
+d7dfc44fea507b5c5c3a8218f79ed8197da8599ebb396d85feb91c25512126b6
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_macroeconomics-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_macroeconomics-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..fb6835039c9d68b5cf5d52244a349c1b8a964c5c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_macroeconomics-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-high_school_macroeconomics": {"acc": 0.2230769230769231, "acc_norm": 0.22564102564102564, "acc_norm_stderr": 0.021193632525148522, "acc_stderr": 0.021107730127244}}, "versions": {"hendrycksTest-high_school_macroeconomics": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-human_sexuality-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-human_sexuality-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..091d7352ce1b260f6acbd1338b7d54c5716d23ce
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-human_sexuality-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-human_sexuality": {"acc": 0.22137404580152673, "acc_norm": 0.22900763358778625, "acc_norm_stderr": 0.036853466317118506, "acc_stderr": 0.0364129708131373}}, "versions": {"hendrycksTest-human_sexuality": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-marketing-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-marketing-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..2cc7a93f1c3c2b4747d4ce739ffbcd522fc50224
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-marketing-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-marketing": {"acc": 0.2863247863247863, "acc_norm": 0.2905982905982906, "acc_norm_stderr": 0.029745048572674043, "acc_stderr": 0.029614323690456648}}, "versions": {"hendrycksTest-marketing": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-miscellaneous-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-miscellaneous-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..b09e99721b8ec71dc85c7ed0798d55a6e0274860
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-miscellaneous-v0-loglikelihood
@@ -0,0 +1 @@
+972dd88dbbaf09d14766e243cfc233425e7c01a26dbc61bdb9eeefa788822331
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-nutrition-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-nutrition-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..2716bebe69e1c3884ba2e88056c87c5a5268b53e
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-nutrition-v0-loglikelihood
@@ -0,0 +1 @@
+19e49d218f55ed5ec4bd1a6cd3f3388c6f620b81484e7abe8b298e5481c3044d
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-virology-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-virology-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..3555c2c5351eb369bf0dc9cfedf93f0bbc3de7b4
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-virology-v0-loglikelihood
@@ -0,0 +1 @@
+0ffa491f7bad2abbb64ecd752a295729167599b3815238cab0ecf4cb08bba9b6
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/iwslt17-en-ar-v0-greedy_until b/lm-evaluation-harness/tests/testdata/iwslt17-en-ar-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..fc59546576857b7f52dd4bfbdfc661c8ce871a6a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/iwslt17-en-ar-v0-greedy_until
@@ -0,0 +1 @@
+b20adbcd2c6d135e28600b427113532c5df624cb3a90e8c5e48715c09a3a38fa
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_mt_fr-v0-res.json b/lm-evaluation-harness/tests/testdata/lambada_mt_fr-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..637c23500b9c153fe74ad9cb0369bd57f22d80a0
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_mt_fr-v0-res.json
@@ -0,0 +1 @@
+{"results": {"lambada_mt_fr": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_mt_fr": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai_mt_it-v0-res.json b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_it-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..2e7f6ef516e5e59af82f1768cfde132d57c1a1ec
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_it-v0-res.json
@@ -0,0 +1 @@
+{"results": {"lambada_openai_mt_it": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_mt_it": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/math_num_theory-v1-res.json b/lm-evaluation-harness/tests/testdata/math_num_theory-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..00917b90ddb0602c62c8a9fef959b9e91eb45c2e
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/math_num_theory-v1-res.json
@@ -0,0 +1 @@
+{"results": {"math_num_theory": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_num_theory": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_bookcorpus2-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_bookcorpus2-v1-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..b37a91cc2dea829e8dab7bb0fe934442c54b3a26
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_bookcorpus2-v1-loglikelihood_rolling
@@ -0,0 +1 @@
+5c17ddfebeab8c41dabadb6fc216ceda91e3fe5dc95aaf1b2c843d7f11828b03
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_books3-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_books3-v0-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..b483d3b45b43abddd6cbd169a8afda8d3f803d9c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_books3-v0-loglikelihood_rolling
@@ -0,0 +1 @@
+0f8f36f705b999b6d55fa72ff89a82793dd1cb568ab1f8727a6a2086a12b9410
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_europarl-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_europarl-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..b948f0d3691443f50c9f9d5ae24804b0c7e79aaa
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_europarl-v1-res.json
@@ -0,0 +1 @@
+{"results": {"pile_europarl": {"bits_per_byte": 1.2477664839621123e-05, "byte_perplexity": 1.000008648895605, "word_perplexity": 1.000063506523818}}, "versions": {"pile_europarl": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_openwebtext2-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_openwebtext2-v0-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..22046e440584d0df85ceeed057ad2c0633273782
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_openwebtext2-v0-loglikelihood_rolling
@@ -0,0 +1 @@
+5d6c19665f429ab1ccbe027da67f42bdaf219f819ab093673976eee55e015ff4
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..21b6bb451fe376e62899f22ea422b3ce9cada469
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v1-res.json
@@ -0,0 +1 @@
+{"results": {"pile_pubmed-abstracts": {"bits_per_byte": 0.0005417858444030858, "byte_perplexity": 1.0003756078534862, "word_perplexity": 1.0025884332779}}, "versions": {"pile_pubmed-abstracts": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/qnli-v0-res.json b/lm-evaluation-harness/tests/testdata/qnli-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..31c3097605f33c489d4f2552ce3060cd7a9155e3
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/qnli-v0-res.json
@@ -0,0 +1 @@
+{"results": {"qnli": {"acc": 0.5108914515833791, "acc_stderr": 0.00676380528502966}}, "versions": {"qnli": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/qqp-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/qqp-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..ecc86dc396332c1aaa8e638e5413633a504e7206
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/qqp-v0-loglikelihood
@@ -0,0 +1 @@
+97b551b0fc3d239aad4929a2e8e79c986891aefd9fcd19441fea0382d507889e
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/triviaqa-v1-loglikelihood b/lm-evaluation-harness/tests/testdata/triviaqa-v1-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..d576c4977fc769dc56c31340f07558fefc1f1459
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/triviaqa-v1-loglikelihood
@@ -0,0 +1 @@
+f8ec05b306b9f6187c0f8117cae441fb85a7a2e4670f4f9a1a3b632b1978421a
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt16-ro-en-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt16-ro-en-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..267763793d5fa5a16c41cbcdd9eb7b134cd34cea
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt16-ro-en-v0-res.json
@@ -0,0 +1 @@
+{"results": {"wmt16-ro-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.01262029828861831, "chrf_stderr": 0.00014507496111350828, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt16-ro-en": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-de-en-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-de-en-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..790424fe4f226224642530ba7fd53a59eec4caa0
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-de-en-v0-res.json
@@ -0,0 +1 @@
+{"results": {"wmt20-de-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.006703243310670055, "chrf_stderr": 0.0001292711927988445, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-de-en": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wsc273-v0-res.json b/lm-evaluation-harness/tests/testdata/wsc273-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..8f023b422a7003d2984e35e58045d8866954a4c4
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wsc273-v0-res.json
@@ -0,0 +1 @@
+{"results": {"wsc273": {"acc": 0.5164835164835165, "acc_stderr": 0.0303004740355766}}, "versions": {"wsc273": 0}}
\ No newline at end of file
diff --git a/venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11 b/venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11
new file mode 100644
index 0000000000000000000000000000000000000000..ecf81f1c12f2451b00d40ce4f37c526f7063fc31
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/nvidia/cufft/lib/libcufftw.so.11
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:ab06d9dfcfaf88ec2bcfb4c16b76ff0bf3b2728370d212e28607f53e1d40eff5
+size 1614344
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..86d857b1e9a26d958b5ab44a0539bae1f182473d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__init__.py
@@ -0,0 +1,142 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_blenderbot": [
+ "BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "BlenderbotConfig",
+ "BlenderbotOnnxConfig",
+ ],
+ "tokenization_blenderbot": ["BlenderbotTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_blenderbot_fast"] = ["BlenderbotTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_blenderbot"] = [
+ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "BlenderbotForCausalLM",
+ "BlenderbotForConditionalGeneration",
+ "BlenderbotModel",
+ "BlenderbotPreTrainedModel",
+ ]
+
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_blenderbot"] = [
+ "TFBlenderbotForConditionalGeneration",
+ "TFBlenderbotModel",
+ "TFBlenderbotPreTrainedModel",
+ ]
+
+
+try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_flax_blenderbot"] = [
+ "FlaxBlenderbotForConditionalGeneration",
+ "FlaxBlenderbotModel",
+ "FlaxBlenderbotPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_blenderbot import (
+ BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ BlenderbotConfig,
+ BlenderbotOnnxConfig,
+ )
+ from .tokenization_blenderbot import BlenderbotTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_blenderbot import (
+ BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ BlenderbotForCausalLM,
+ BlenderbotForConditionalGeneration,
+ BlenderbotModel,
+ BlenderbotPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_blenderbot import (
+ TFBlenderbotForConditionalGeneration,
+ TFBlenderbotModel,
+ TFBlenderbotPreTrainedModel,
+ )
+
+ try:
+ if not is_flax_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_flax_blenderbot import (
+ FlaxBlenderbotForConditionalGeneration,
+ FlaxBlenderbotModel,
+ FlaxBlenderbotPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..779cf58f2f93edafda7e897cc03b9e6f0ce8060e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/configuration_blenderbot.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/configuration_blenderbot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dea879343006446b2c38fa97e71a5f6e988ec17e
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/configuration_blenderbot.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c7f3bf120da91f7384f1dae27322840fc868c803
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_blenderbot.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_blenderbot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3dcb1fa9fbe50e63b529b26eaf67e4de44ca8832
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_blenderbot.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_flax_blenderbot.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_flax_blenderbot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f36f8f3c4b0ea468ca7b762179757924b50c7398
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_flax_blenderbot.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_tf_blenderbot.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_tf_blenderbot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5213d3128327693ae8d307b46d77045e5f91e514
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/modeling_tf_blenderbot.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3fad447ee4c874da42a9fa0e5358ed36e84ecc7d
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4702d374cec4ca61d9f118d1c5a1561e21ba4a69
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/__pycache__/tokenization_blenderbot_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/configuration_blenderbot.py b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/configuration_blenderbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..00608710592998db8d4bde42a73f621e30431f90
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/configuration_blenderbot.py
@@ -0,0 +1,395 @@
+# coding=utf-8
+# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Blenderbot model configuration"""
+
+from collections import OrderedDict
+from typing import Any, Mapping, Optional
+
+from ... import PreTrainedTokenizer
+from ...configuration_utils import PretrainedConfig
+from ...file_utils import TensorType, is_torch_available
+from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast
+from ...onnx.utils import compute_effective_axis_dimension
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class BlenderbotConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`BlenderbotModel`]. It is used to instantiate an
+ Blenderbot model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the Blenderbot
+ [facebook/blenderbot-3B](https://huggingface.co/facebook/blenderbot-3B) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 50265):
+ Vocabulary size of the Blenderbot model. Defines the number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`BlenderbotModel`] or [`TFBlenderbotModel`].
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ max_position_embeddings (`int`, *optional*, defaults to 128):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556)
+ for more details.
+ scale_embedding (`bool`, *optional*, defaults to `False`):
+ Scale embeddings by diving by sqrt(d_model).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models)
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
+ `eos_token_id`.
+
+ Example:
+
+ ```python
+ >>> from transformers import BlenderbotConfig, BlenderbotModel
+
+ >>> # Initializing a Blenderbot facebook/blenderbot-3B style configuration
+ >>> configuration = BlenderbotConfig()
+
+ >>> # Initializing a model (with random weights) from the facebook/blenderbot-3B style configuration
+ >>> model = BlenderbotModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "blenderbot"
+ keys_to_ignore_at_inference = ["past_key_values"]
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
+
+ def __init__(
+ self,
+ vocab_size=8008,
+ max_position_embeddings=128,
+ encoder_layers=2,
+ encoder_ffn_dim=10240,
+ encoder_attention_heads=32,
+ decoder_layers=24,
+ decoder_ffn_dim=10240,
+ decoder_attention_heads=32,
+ encoder_layerdrop=0.0,
+ decoder_layerdrop=0.0,
+ use_cache=True,
+ is_encoder_decoder=True,
+ activation_function="gelu",
+ d_model=2560,
+ dropout=0.1,
+ attention_dropout=0.0,
+ activation_dropout=0.0,
+ init_std=0.02,
+ decoder_start_token_id=1,
+ scale_embedding=False,
+ pad_token_id=0,
+ bos_token_id=1,
+ eos_token_id=2,
+ encoder_no_repeat_ngram_size=3,
+ forced_eos_token_id=2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.d_model = d_model
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.activation_function = activation_function
+ self.init_std = init_std
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.use_cache = use_cache
+ self.num_hidden_layers = encoder_layers
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ decoder_start_token_id=decoder_start_token_id,
+ encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size,
+ forced_eos_token_id=forced_eos_token_id,
+ **kwargs,
+ )
+
+
+class BlenderbotOnnxConfig(OnnxSeq2SeqConfigWithPast):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ]
+ )
+ if self.use_past:
+ common_inputs["decoder_input_ids"] = {0: "batch"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"}
+ else:
+ common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"}
+ common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"}
+ if self.use_past:
+ self.fill_with_past_key_values_(common_inputs, direction="inputs")
+ elif self.task == "causal-lm":
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ]
+ )
+ if self.use_past:
+ _, num_decoder_layers = self.num_layers
+ for i in range(num_decoder_layers):
+ common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
+ common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
+ else:
+ common_inputs = OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "encoder_sequence"}),
+ ("attention_mask", {0: "batch", 1: "encoder_sequence"}),
+ ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
+ ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
+ ]
+ )
+
+ return common_inputs
+
+ @property
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_outputs = super().outputs
+ else:
+ common_outputs = super(OnnxConfigWithPast, self).outputs
+ if self.use_past:
+ num_encoder_layers, _ = self.num_layers
+ for i in range(num_encoder_layers):
+ common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"}
+ common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"}
+ return common_outputs
+
+ def _generate_dummy_inputs_for_default_and_seq2seq_lm(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, seq_length, is_pair, framework
+ )
+ # Generate decoder inputs
+ decoder_seq_length = seq_length if not self.use_past else 1
+ decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, decoder_seq_length, is_pair, framework
+ )
+ decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
+ common_inputs = dict(**encoder_inputs, **decoder_inputs)
+
+ if self.use_past:
+ if not is_torch_available():
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
+ else:
+ import torch
+ batch, encoder_seq_length = common_inputs["input_ids"].shape
+ decoder_seq_length = common_inputs["decoder_input_ids"].shape[1]
+ num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads
+ encoder_shape = (
+ batch,
+ num_encoder_attention_heads,
+ encoder_seq_length,
+ self._config.hidden_size // num_encoder_attention_heads,
+ )
+ decoder_past_length = decoder_seq_length
+ decoder_shape = (
+ batch,
+ num_decoder_attention_heads,
+ decoder_past_length,
+ self._config.hidden_size // num_decoder_attention_heads,
+ )
+ common_inputs["decoder_attention_mask"] = torch.cat(
+ [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1
+ )
+ common_inputs["past_key_values"] = []
+ _, num_decoder_layers = self.num_layers
+
+ for _ in range(num_decoder_layers):
+ common_inputs["past_key_values"].append(
+ (
+ torch.zeros(decoder_shape),
+ torch.zeros(decoder_shape),
+ torch.zeros(encoder_shape),
+ torch.zeros(encoder_shape),
+ )
+ )
+ return common_inputs
+
+ def _generate_dummy_inputs_for_causal_lm(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size, seq_length, is_pair, framework
+ )
+
+ if self.use_past:
+ if not is_torch_available():
+ raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
+ else:
+ import torch
+ batch, seqlen = common_inputs["input_ids"].shape
+ past_key_values_length = seqlen
+ _, num_decoder_layers = self.num_layers
+ num_encoder_attention_heads, _ = self.num_attention_heads
+ past_shape = (
+ batch,
+ num_encoder_attention_heads,
+ past_key_values_length,
+ self._config.hidden_size // num_encoder_attention_heads,
+ )
+ mask_dtype = common_inputs["attention_mask"].dtype
+ common_inputs["attention_mask"] = torch.cat(
+ [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1
+ )
+ common_inputs["past_key_values"] = [
+ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_decoder_layers)
+ ]
+ return common_inputs
+
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._generate_dummy_inputs_for_sequence_classification_and_question_answering
+ def _generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ # Copied from OnnxConfig.generate_dummy_inputs
+ # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
+ # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
+ batch_size = compute_effective_axis_dimension(
+ batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0
+ )
+
+ # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
+ token_to_add = tokenizer.num_special_tokens_to_add(is_pair)
+ seq_length = compute_effective_axis_dimension(
+ seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add
+ )
+
+ # Generate dummy inputs according to compute batch and sequence
+ dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size
+ common_inputs = dict(tokenizer(dummy_input, return_tensors=framework))
+ return common_inputs
+
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.generate_dummy_inputs
+ def generate_dummy_inputs(
+ self,
+ tokenizer: PreTrainedTokenizer,
+ batch_size: int = -1,
+ seq_length: int = -1,
+ is_pair: bool = False,
+ framework: Optional[TensorType] = None,
+ ) -> Mapping[str, Any]:
+ if self.task in ["default", "seq2seq-lm"]:
+ common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+
+ elif self.task == "causal-lm":
+ common_inputs = self._generate_dummy_inputs_for_causal_lm(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+ else:
+ common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
+ tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework
+ )
+
+ return common_inputs
+
+ # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig._flatten_past_key_values_
+ def _flatten_past_key_values_(self, flattened_output, name, idx, t):
+ if self.task in ["default", "seq2seq-lm"]:
+ flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t)
+ else:
+ flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_(
+ flattened_output, name, idx, t
+ )
+
+ def fill_with_past_key_values_(self, inputs_or_outputs: Mapping[str, Mapping[int, str]], direction: str):
+ if direction not in ["inputs", "outputs"]:
+ raise ValueError(f'direction must either be "inputs" or "outputs", but {direction} was given')
+
+ name = "past_key_values" if direction == "inputs" else "present"
+ _, num_decoder_layers = self.num_layers
+
+ encoder_sequence = "past_encoder_sequence"
+ decoder_sequence = "past_decoder_sequence" if direction == "inputs" else "past_decoder_sequence + sequence"
+
+ for i in range(num_decoder_layers):
+ inputs_or_outputs[f"{name}.{i}.decoder.key"] = {0: "batch", 2: decoder_sequence}
+ inputs_or_outputs[f"{name}.{i}.decoder.value"] = {0: "batch", 2: decoder_sequence}
+ inputs_or_outputs[f"{name}.{i}.encoder.key"] = {0: "batch", 2: encoder_sequence}
+ inputs_or_outputs[f"{name}.{i}.encoder.value"] = {0: "batch", 2: encoder_sequence}
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5919b94d42fb3555010cc9a454b2d31ecaa52ed
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,114 @@
+# coding=utf-8
+# Copyright 2020 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Convert Blenderbot checkpoint."""
+
+import argparse
+
+import torch
+
+from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
+from transformers.utils import logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger(__name__)
+
+PATTERNS = [
+ ["attention", "attn"],
+ ["encoder_attention", "encoder_attn"],
+ ["q_lin", "q_proj"],
+ ["k_lin", "k_proj"],
+ ["v_lin", "v_proj"],
+ ["out_lin", "out_proj"],
+ ["norm_embeddings", "layernorm_embedding"],
+ ["position_embeddings", "embed_positions"],
+ ["embeddings", "embed_tokens"],
+ ["ffn.lin", "fc"],
+]
+
+
+def rename_state_dict_key(k):
+ if k == "embeddings.weight":
+ return "shared.weight"
+
+ for parlai_name, hf_name in PATTERNS:
+ k = k.replace(parlai_name, hf_name)
+
+ if k.startswith("encoder"):
+ k = k.replace(".attn", ".self_attn")
+ k = k.replace("norm1", "self_attn_layer_norm")
+ k = k.replace("norm2", "final_layer_norm")
+ elif k.startswith("decoder"):
+ k = k.replace("norm1", "self_attn_layer_norm")
+ k = k.replace("norm2", "encoder_attn_layer_norm")
+ k = k.replace("norm3", "final_layer_norm")
+ return k
+
+
+def rename_layernorm_keys(sd):
+ keys = [
+ "model.encoder.layernorm_embedding.weight",
+ "model.encoder.layernorm_embedding.bias",
+ "model.decoder.layernorm_embedding.weight",
+ "model.decoder.layernorm_embedding.bias",
+ ]
+ for k in keys:
+ v = sd.pop(k)
+ new_k = k.replace("layernorm_embedding", "layer_norm")
+ assert new_k not in sd
+ sd[new_k] = v
+
+
+IGNORE_KEYS = ["START"]
+
+
+@torch.no_grad()
+def convert_parlai_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_json_path):
+ """
+ Copy/paste/tweak model's weights to our BERT structure.
+ """
+ model = torch.load(checkpoint_path, map_location="cpu")
+ sd = model["model"]
+ cfg = BlenderbotConfig.from_json_file(config_json_path)
+ m = BlenderbotForConditionalGeneration(cfg)
+ valid_keys = m.model.state_dict().keys()
+ failures = []
+ mapping = {}
+ for k, v in sd.items():
+ if k in IGNORE_KEYS:
+ continue
+
+ new_k = rename_state_dict_key(k)
+ if new_k not in valid_keys:
+ failures.append([k, new_k])
+ else:
+ mapping[new_k] = v
+ if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
+ rename_layernorm_keys(sd)
+ m.model.load_state_dict(mapping, strict=True)
+ m.half()
+ m.save_pretrained(pytorch_dump_folder_path)
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
+ parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
+ parser.add_argument(
+ "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
+ )
+ args = parser.parse_args()
+ convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_blenderbot.py b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_blenderbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..5fa17abcdd294e0d5a5ac27c095165bfbd5d0937
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_blenderbot.py
@@ -0,0 +1,1597 @@
+# coding=utf-8
+# Copyright 2021 The Facebook, Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Blenderbot model."""
+
+
+import copy
+import math
+import os
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from ..blenderbot_small import BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel
+from .configuration_blenderbot import BlenderbotConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "BlenderbotConfig"
+_CHECKPOINT_FOR_DOC = "facebook/blenderbot-400M-distill"
+
+
+from ..deprecated._archive_maps import BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.bart.modeling_bart.shift_tokens_right
+def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = input_ids.new_zeros(input_ids.shape)
+ shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
+ shifted_input_ids[:, 0] = decoder_start_token_id
+
+ if pad_token_id is None:
+ raise ValueError("self.model.config.pad_token_id has to be defined.")
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)
+
+ return shifted_input_ids
+
+
+class BlenderbotLearnedPositionalEmbedding(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int):
+ super().__init__(num_embeddings, embedding_dim)
+
+ def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0):
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+ bsz, seq_len = input_ids_shape[:2]
+ positions = torch.arange(
+ past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device
+ )
+ return super().forward(positions)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Blenderbot
+class BlenderbotAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[BlenderbotConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+BLENDERBOT_ATTENTION_CLASSES = {"eager": BlenderbotAttention}
+
+
+# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->Blenderbot, MBART->BLENDERBOT
+class BlenderbotEncoderLayer(nn.Module):
+ def __init__(self, config: BlenderbotConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.encoder_attention_heads,
+ dropout=config.attention_dropout,
+ config=config,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ layer_head_mask: torch.Tensor,
+ output_attentions: bool = False,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ if hidden_states.dtype == torch.float16 and (
+ torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any()
+ ):
+ clamp_value = torch.finfo(hidden_states.dtype).max - 1000
+ hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer with MBart->Blenderbot, MBART->BLENDERBOT
+class BlenderbotDecoderLayer(nn.Module):
+ def __init__(self, config: BlenderbotConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ is_causal=True,
+ config=config,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.encoder_attn = BLENDERBOT_ATTENTION_CLASSES[config._attn_implementation](
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ is_decoder=True,
+ config=config,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.Tensor] = None,
+ encoder_attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> torch.Tensor:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`torch.FloatTensor`):
+ cross attention input to the layer of shape `(batch, seq_len, embed_dim)`
+ encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of
+ size `(decoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class BlenderbotPreTrainedModel(PreTrainedModel):
+ config_class = BlenderbotConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ @property
+ def dummy_inputs(self):
+ pad_token = self.config.pad_token_id
+ input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
+ dummy_inputs = {
+ "attention_mask": input_ids.ne(pad_token),
+ "input_ids": input_ids,
+ "decoder_input_ids": input_ids,
+ }
+ return dummy_inputs
+
+
+BLENDERBOT_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`BlenderbotConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BLENDERBOT_GENERATION_EXAMPLE = r"""
+ Conversation example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, BlenderbotForConditionalGeneration
+
+ >>> mname = "facebook/blenderbot-400M-distill"
+ >>> model = BlenderbotForConditionalGeneration.from_pretrained(mname)
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
+ >>> print("Human: ", UTTERANCE)
+ Human: My friends are cool but they eat too many carbs.
+
+ >>> inputs = tokenizer([UTTERANCE], return_tensors="pt")
+ >>> reply_ids = model.generate(**inputs)
+ >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
+ Bot: That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?
+
+ >>> REPLY = "I'm not sure"
+ >>> print("Human: ", REPLY)
+ Human: I'm not sure
+
+ >>> NEXT_UTTERANCE = (
+ ... "My friends are cool but they eat too many carbs. That's unfortunate. "
+ ... "Are they trying to lose weight or are they just trying to be healthier? "
+ ... " I'm not sure."
+ ... )
+ >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="pt")
+ >>> next_reply_ids = model.generate(**inputs)
+ >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
+ Bot: I see. Well, it's good that they're trying to change their eating habits.
+ ```
+"""
+
+BLENDERBOT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+ decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class BlenderbotEncoder(BlenderbotPreTrainedModel):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`BlenderbotEncoderLayer`].
+
+ Args:
+ config: BlenderbotConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+
+ embed_dim = config.d_model
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+
+ if embed_tokens is not None:
+ self.embed_tokens = embed_tokens
+ else:
+ self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx)
+
+ self.embed_positions = BlenderbotLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ embed_dim,
+ )
+ self.layers = nn.ModuleList([BlenderbotEncoderLayer(config) for _ in range(config.encoder_layers)])
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ head_mask=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(input_shape)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ if head_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ to_drop = False
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop: # skip the layer
+ to_drop = True
+
+ if to_drop:
+ layer_outputs = (None, None)
+ else:
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ (head_mask[idx] if head_mask is not None else None),
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ # add final layer norm
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+class BlenderbotDecoder(BlenderbotPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BlenderbotDecoderLayer`]
+
+ Args:
+ config: BlenderbotConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[nn.Embedding] = None):
+ super().__init__(config)
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_target_positions = config.max_position_embeddings
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
+
+ if embed_tokens is not None:
+ self.embed_tokens = embed_tokens
+ else:
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx)
+
+ self.embed_positions = BlenderbotLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ )
+ self.layers = nn.ModuleList([BlenderbotDecoderLayer(config) for _ in range(config.decoder_layers)])
+ self.layer_norm = nn.LayerNorm(config.d_model)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ def forward(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ ):
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder to avoid performing
+ cross-attention on hidden heads. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # expand encoder attention mask
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _prepare_4d_attention_mask(
+ encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
+ )
+
+ # embed positions
+ positions = self.embed_positions(input_shape, past_key_values_length)
+
+ hidden_states = inputs_embeds + positions
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+ next_decoder_cache = () if use_cache else None
+
+ # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ if attn_mask.size()[0] != len(self.layers):
+ raise ValueError(
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(
+ cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None
+ ),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # add final layer norm
+ hidden_states = self.layer_norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Blenderbot Model outputting raw hidden-states without any specific head on top.",
+ BLENDERBOT_START_DOCSTRING,
+)
+class BlenderbotModel(BlenderbotPreTrainedModel):
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight"]
+
+ def __init__(self, config: BlenderbotConfig):
+ super().__init__(config)
+
+ padding_idx, vocab_size = config.pad_token_id, config.vocab_size
+ self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx)
+
+ self.encoder = BlenderbotEncoder(config, self.shared)
+ self.decoder = BlenderbotDecoder(config, self.shared)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
+ if pretrained_model_name_or_path == "facebook/blenderbot-90M":
+ warnings.warn(
+ "The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
+ " checkpoint `facebook/small_blenderbot-90M` with"
+ " `BlenderbotSmallModel.from_pretrained('facebook/small_blenderbot-90M')` instead.",
+ FutureWarning,
+ )
+ return BlenderbotSmallModel.from_pretrained(pretrained_model_name_or_path)
+
+ return super(BlenderbotModel, cls).from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, value):
+ self.shared = value
+ self.encoder.embed_tokens = self.shared
+ self.decoder.embed_tokens = self.shared
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, BlenderbotModel
+
+ >>> model = BlenderbotModel.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+
+ >>> inputs = tokenizer("Studies have been shown that owning a dog is good for you", return_tensors="pt")
+ >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1
+ >>> outputs = model(input_ids=inputs.input_ids, decoder_input_ids=decoder_input_ids)
+
+ >>> last_hidden_states = outputs.last_hidden_state
+ >>> list(last_hidden_states.shape)
+ [1, 6, 1280]
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ "The Blenderbot Model with a language modeling head. Can be used for summarization.", BLENDERBOT_START_DOCSTRING
+)
+class BlenderbotForConditionalGeneration(BlenderbotPreTrainedModel):
+ base_model_prefix = "model"
+ _keys_to_ignore_on_load_missing = ["final_logits_bias"]
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(self, config: BlenderbotConfig):
+ super().__init__(config)
+ self.model = BlenderbotModel(config)
+ self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings)))
+ self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
+ if pretrained_model_name_or_path == "facebook/blenderbot-90M":
+ warnings.warn(
+ "The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
+ " checkpoint `facebook/small_blenderbot-90M` with"
+ " `BlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')` instead.",
+ FutureWarning,
+ )
+ return BlenderbotSmallForConditionalGeneration.from_pretrained(pretrained_model_name_or_path)
+
+ return super(BlenderbotForConditionalGeneration, cls).from_pretrained(
+ pretrained_model_name_or_path, *model_args, **kwargs
+ )
+
+ def get_encoder(self):
+ return self.model.get_encoder()
+
+ def get_decoder(self):
+ return self.model.get_decoder()
+
+ def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
+ new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
+ self._resize_final_logits_bias(new_embeddings.weight.shape[0])
+ return new_embeddings
+
+ def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
+ old_num_tokens = self.final_logits_bias.shape[-1]
+ if new_num_tokens <= old_num_tokens:
+ new_bias = self.final_logits_bias[:, :new_num_tokens]
+ else:
+ extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
+ new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
+ self.register_buffer("final_logits_bias", new_bias)
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Union[Tuple, BaseModelOutput]] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ if use_cache:
+ logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past is used
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if decoder_input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = decoder_input_ids.shape[1] - 1
+
+ decoder_input_ids = decoder_input_ids[:, remove_prefix_length:]
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ # cached cross_attention states don't have to be reordered -> they are always the same
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2])
+ + layer_past[2:],
+ )
+ return reordered_past
+
+
+# Copied from transformers.models.bart.modeling_bart.BartDecoderWrapper with Bart->Blenderbot
+class BlenderbotDecoderWrapper(BlenderbotPreTrainedModel):
+ """
+ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
+ used in combination with the [`EncoderDecoderModel`] framework.
+ """
+
+ def __init__(self, config):
+ super().__init__(config)
+ self.decoder = BlenderbotDecoder(config)
+
+ def forward(self, *args, **kwargs):
+ return self.decoder(*args, **kwargs)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartForCausalLM with Bart->Blenderbot, facebook/bart-base->facebook/blenderbot-400M-distill
+class BlenderbotForCausalLM(BlenderbotPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config):
+ config = copy.deepcopy(config)
+ config.is_decoder = True
+ config.is_encoder_decoder = False
+ super().__init__(config)
+ self.model = BlenderbotDecoderWrapper(config)
+
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model.decoder = decoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ if the model is configured as a decoder.
+ encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used
+ in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
+ head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of
+ shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional
+ tensors are only required when the model is used as a decoder in a Sequence to Sequence model.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the
+ cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, BlenderbotForCausalLM
+
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> model = BlenderbotForCausalLM.from_pretrained("facebook/blenderbot-400M-distill", add_cross_attention=False)
+ >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder."
+ >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
+ >>> outputs = model(**inputs)
+
+ >>> logits = outputs.logits
+ >>> expected_shape = [1, inputs.input_ids.shape[-1], model.config.vocab_size]
+ >>> list(logits.shape) == expected_shape
+ True
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ head_mask=head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ logits = self.lm_head(outputs[0])
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs
+ ):
+ # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
+ if attention_mask is None:
+ attention_mask = input_ids.new_ones(input_ids.shape)
+
+ if past_key_values:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+ # first step, decoder_cached_states are empty
+ return {
+ "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
+ "attention_mask": attention_mask,
+ "past_key_values": past_key_values,
+ "use_cache": use_cache,
+ }
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_flax_blenderbot.py b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_flax_blenderbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..61239335be3b639eb65520aa51f97986938633c9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_flax_blenderbot.py
@@ -0,0 +1,1505 @@
+# coding=utf-8
+# Copyright 2021 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Flax Blenderbot model."""
+
+import math
+import random
+from functools import partial
+from typing import Callable, Optional, Tuple
+
+import flax.linen as nn
+import jax
+import jax.numpy as jnp
+from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
+from flax.linen import combine_masks, make_causal_mask
+from flax.linen.attention import dot_product_attention_weights
+from flax.traverse_util import flatten_dict, unflatten_dict
+from jax import lax
+from jax.random import PRNGKey
+
+from ...modeling_flax_outputs import (
+ FlaxBaseModelOutput,
+ FlaxBaseModelOutputWithPastAndCrossAttentions,
+ FlaxCausalLMOutputWithCrossAttentions,
+ FlaxSeq2SeqLMOutput,
+ FlaxSeq2SeqModelOutput,
+)
+from ...modeling_flax_utils import (
+ ACT2FN,
+ FlaxPreTrainedModel,
+ append_call_sample_docstring,
+ append_replace_return_docstrings,
+ overwrite_call_docstring,
+)
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_blenderbot import BlenderbotConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "BlenderbotConfig"
+_CHECKPOINT_FOR_DOC = "facebook/blenderbot-400M-distill"
+
+
+BLENDERBOT_START_DOCSTRING = r"""
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a Flax Linen
+ [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a
+ regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior.
+
+ Finally, this model supports inherent JAX features such as:
+
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
+
+ Parameters:
+ config ([`BlenderbotConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BLENDERBOT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+BLENDERBOT_ENCODE_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+BLENDERBOT_DECODE_INPUTS_DOCSTRING = r"""
+ Args:
+ decoder_input_ids (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ For translation and summarization training, `decoder_input_ids` should be provided. If no
+ `decoder_input_ids` is provided, the model will create this tensor by shifting the `input_ids` to the right
+ for denoising pre-training following the paper.
+ encoder_outputs (`tuple(tuple(jnp.ndarray)`):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of
+ hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ encoder_attention_mask (`jnp.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_attention_mask (`jnp.ndarray` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+
+ If you want to change padding behavior, you should modify to your needs. See diagram 1 in [the
+ paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy.
+ decoder_position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`):
+ Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast
+ auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
+def shift_tokens_right(input_ids: jnp.ndarray, pad_token_id: int, decoder_start_token_id: int) -> jnp.ndarray:
+ """
+ Shift input ids one token to the right.
+ """
+ shifted_input_ids = jnp.zeros_like(input_ids)
+ shifted_input_ids = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1])
+ shifted_input_ids = shifted_input_ids.at[:, 0].set(decoder_start_token_id)
+
+ shifted_input_ids = jnp.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
+ return shifted_input_ids
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention with Bart->Blenderbot
+class FlaxBlenderbotAttention(nn.Module):
+ config: BlenderbotConfig
+ embed_dim: int
+ num_heads: int
+ dropout: float = 0.0
+ causal: bool = False
+ bias: bool = True
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self) -> None:
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {self.num_heads})."
+ )
+
+ dense = partial(
+ nn.Dense,
+ self.embed_dim,
+ use_bias=self.bias,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ self.q_proj, self.k_proj, self.v_proj = dense(), dense(), dense()
+ self.out_proj = dense()
+
+ self.dropout_layer = nn.Dropout(rate=self.dropout)
+
+ if self.causal:
+ self.causal_mask = make_causal_mask(
+ jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool"
+ )
+
+ def _split_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
+
+ def _merge_heads(self, hidden_states):
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
+
+ @nn.compact
+ def _concatenate_to_cache(self, key, value, query, attention_mask):
+ """
+ This function takes projected key, value states from a single input token and concatenates the states to cached
+ states from previous steps. This function is slighly adapted from the official Flax repository:
+ https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252
+ """
+ # detect if we're initializing by absence of existing cache data.
+ is_initialized = self.has_variable("cache", "cached_key")
+ cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype)
+ cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype)
+ cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
+
+ if is_initialized:
+ *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape
+ # update key, value caches with our new 1d spatial slices
+ cur_index = cache_index.value
+ indices = (0,) * len(batch_dims) + (cur_index, 0, 0)
+ key = lax.dynamic_update_slice(cached_key.value, key, indices)
+ value = lax.dynamic_update_slice(cached_value.value, value, indices)
+ cached_key.value = key
+ cached_value.value = value
+ num_updated_cache_vectors = query.shape[1]
+ cache_index.value = cache_index.value + num_updated_cache_vectors
+ # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements.
+ pad_mask = jnp.broadcast_to(
+ jnp.arange(max_length) < cur_index + num_updated_cache_vectors,
+ tuple(batch_dims) + (1, num_updated_cache_vectors, max_length),
+ )
+ attention_mask = combine_masks(pad_mask, attention_mask)
+ return key, value, attention_mask
+
+ def __call__(
+ self,
+ hidden_states: jnp.ndarray,
+ key_value_states: Optional[jnp.ndarray] = None,
+ attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ deterministic: bool = True,
+ ) -> Tuple[jnp.ndarray]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ batch_size = hidden_states.shape[0]
+
+ # get query proj
+ query_states = self.q_proj(hidden_states)
+ # get key, value proj
+ if is_cross_attention:
+ # cross_attentions
+ key_states = self.k_proj(key_value_states)
+ value_states = self.v_proj(key_value_states)
+ else:
+ # self_attention
+ key_states = self.k_proj(hidden_states)
+ value_states = self.v_proj(hidden_states)
+
+ query_states = self._split_heads(query_states)
+ key_states = self._split_heads(key_states)
+ value_states = self._split_heads(value_states)
+
+ # handle cache prepare causal attention mask
+ if self.causal:
+ query_length, key_length = query_states.shape[1], key_states.shape[1]
+ if self.has_variable("cache", "cached_key"):
+ mask_shift = self.variables["cache"]["cache_index"]
+ max_decoder_length = self.variables["cache"]["cached_key"].shape[1]
+ causal_mask = lax.dynamic_slice(
+ self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length)
+ )
+ else:
+ causal_mask = self.causal_mask[:, :, :query_length, :key_length]
+ causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:])
+
+ # combine masks if needed
+ if attention_mask is not None and self.causal:
+ attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape)
+ attention_mask = combine_masks(attention_mask, causal_mask)
+ elif self.causal:
+ attention_mask = causal_mask
+ elif attention_mask is not None:
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
+
+ # During fast autoregressive decoding, we feed one position at a time,
+ # and cache the keys and values step by step.
+ if self.causal and (self.has_variable("cache", "cached_key") or init_cache):
+ key_states, value_states, attention_mask = self._concatenate_to_cache(
+ key_states, value_states, query_states, attention_mask
+ )
+
+ # Convert the boolean attention mask to an attention bias.
+ if attention_mask is not None:
+ # attention mask in the form of attention bias
+ attention_bias = lax.select(
+ attention_mask > 0,
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
+ )
+ else:
+ attention_bias = None
+
+ dropout_rng = None
+ if not deterministic and self.dropout > 0.0:
+ dropout_rng = self.make_rng("dropout")
+
+ attn_weights = dot_product_attention_weights(
+ query_states,
+ key_states,
+ bias=attention_bias,
+ dropout_rng=dropout_rng,
+ dropout_rate=self.dropout,
+ broadcast_dropout=True,
+ deterministic=deterministic,
+ dtype=self.dtype,
+ precision=None,
+ )
+
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states)
+ attn_output = self._merge_heads(attn_output)
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights
+
+
+# Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartEncoderLayer with MBart->Blenderbot
+class FlaxBlenderbotEncoderLayer(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self) -> None:
+ self.embed_dim = self.config.d_model
+ self.self_attn = FlaxBlenderbotAttention(
+ config=self.config,
+ embed_dim=self.embed_dim,
+ num_heads=self.config.encoder_attention_heads,
+ dropout=self.config.attention_dropout,
+ dtype=self.dtype,
+ )
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+ self.activation_fn = ACT2FN[self.config.activation_function]
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
+ self.fc1 = nn.Dense(
+ self.config.encoder_ffn_dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.fc2 = nn.Dense(
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
+ )
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ hidden_states: jnp.ndarray,
+ attention_mask: jnp.ndarray,
+ output_attentions: bool = True,
+ deterministic: bool = True,
+ ) -> Tuple[jnp.ndarray]:
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, attn_weights = self.self_attn(hidden_states=hidden_states, attention_mask=attention_mask)
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartEncoderLayerCollection with Bart->Blenderbot
+class FlaxBlenderbotEncoderLayerCollection(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxBlenderbotEncoderLayer(self.config, name=str(i), dtype=self.dtype)
+ for i in range(self.config.encoder_layers)
+ ]
+ self.layerdrop = self.config.encoder_layerdrop
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ deterministic: bool = True,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ all_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+
+ for encoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if not deterministic and (dropout_probability < self.layerdrop): # skip the layer
+ layer_outputs = (None, None)
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ output_attentions,
+ deterministic,
+ )
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ outputs = (hidden_states, all_hidden_states, all_attentions)
+
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+
+# Copied from transformers.models.mbart.modeling_flax_mbart.FlaxMBartDecoderLayer with MBart->Blenderbot
+class FlaxBlenderbotDecoderLayer(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32
+
+ def setup(self) -> None:
+ self.embed_dim = self.config.d_model
+ self.self_attn = FlaxBlenderbotAttention(
+ config=self.config,
+ embed_dim=self.embed_dim,
+ num_heads=self.config.decoder_attention_heads,
+ dropout=self.config.attention_dropout,
+ causal=True,
+ dtype=self.dtype,
+ )
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+ self.activation_fn = ACT2FN[self.config.activation_function]
+ self.activation_dropout_layer = nn.Dropout(rate=self.config.activation_dropout)
+
+ self.self_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.encoder_attn = FlaxBlenderbotAttention(
+ config=self.config,
+ embed_dim=self.embed_dim,
+ num_heads=self.config.decoder_attention_heads,
+ dropout=self.config.attention_dropout,
+ dtype=self.dtype,
+ )
+ self.encoder_attn_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+ self.fc1 = nn.Dense(
+ self.config.decoder_ffn_dim,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.fc2 = nn.Dense(
+ self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.init_std)
+ )
+ self.final_layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ hidden_states: jnp.ndarray,
+ attention_mask: jnp.ndarray,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ output_attentions: bool = True,
+ deterministic: bool = True,
+ ) -> Tuple[jnp.ndarray]:
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights = self.self_attn(
+ hidden_states=hidden_states, attention_mask=attention_mask, init_cache=init_cache
+ )
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+ hidden_states, cross_attn_weights = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ )
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights, cross_attn_weights)
+
+ return outputs
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderLayerCollection with Bart->Blenderbot
+class FlaxBlenderbotDecoderLayerCollection(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.layers = [
+ FlaxBlenderbotDecoderLayer(self.config, name=str(i), dtype=self.dtype)
+ for i in range(self.config.decoder_layers)
+ ]
+ self.layerdrop = self.config.decoder_layerdrop
+
+ def __call__(
+ self,
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ deterministic: bool = True,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None
+
+ for decoder_layer in self.layers:
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if not deterministic and (dropout_probability < self.layerdrop):
+ layer_outputs = (None, None, None)
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ deterministic=deterministic,
+ )
+
+ hidden_states = layer_outputs[0]
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ if encoder_hidden_states is not None:
+ all_cross_attentions += (layer_outputs[2],)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ outputs = [hidden_states, all_hidden_states, all_self_attns, all_cross_attentions]
+
+ if not return_dict:
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+class FlaxBlenderbotEncoder(nn.Module):
+ config: BlenderbotConfig
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+
+ embed_dim = self.config.d_model
+ self.padding_idx = self.config.pad_token_id
+ self.max_source_positions = self.config.max_position_embeddings
+ self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0
+
+ self.embed_positions = nn.Embed(
+ self.config.max_position_embeddings,
+ embed_dim,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.layers = FlaxBlenderbotEncoderLayerCollection(self.config, self.dtype)
+ self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ input_shape = input_ids.shape
+ input_ids = input_ids.reshape(-1, input_shape[-1])
+
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(position_ids)
+
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+
+ outputs = self.layers(
+ hidden_states,
+ attention_mask,
+ deterministic=deterministic,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ last_hidden_states = outputs[0]
+ last_hidden_states = self.layer_norm(last_hidden_states)
+
+ # update the last element in `hidden_states` after applying `layernorm` above
+ hidden_states = None
+ if output_hidden_states:
+ hidden_states = outputs[1]
+ hidden_states = hidden_states[:-1] + (last_hidden_states,)
+
+ if not return_dict:
+ outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutput(
+ last_hidden_state=last_hidden_states,
+ hidden_states=hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class FlaxBlenderbotDecoder(nn.Module):
+ config: BlenderbotConfig
+ embed_tokens: nn.Embed
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.dropout_layer = nn.Dropout(rate=self.config.dropout)
+
+ embed_dim = self.config.d_model
+ self.padding_idx = self.config.pad_token_id
+ self.max_target_positions = self.config.max_position_embeddings
+ self.embed_scale = math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0
+
+ self.embed_positions = nn.Embed(
+ self.config.max_position_embeddings,
+ embed_dim,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+
+ self.layers = FlaxBlenderbotDecoderLayerCollection(self.config, self.dtype)
+ self.layer_norm = nn.LayerNorm(dtype=self.dtype, epsilon=1e-05)
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ position_ids,
+ encoder_hidden_states: Optional[jnp.ndarray] = None,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ init_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ input_shape = input_ids.shape
+ input_ids = input_ids.reshape(-1, input_shape[-1])
+
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ # embed positions
+ positions = self.embed_positions(position_ids)
+
+ hidden_states = inputs_embeds + positions
+ hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic)
+
+ outputs = self.layers(
+ hidden_states,
+ attention_mask,
+ encoder_hidden_states,
+ encoder_attention_mask,
+ deterministic=deterministic,
+ init_cache=init_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_states = outputs[0]
+ last_hidden_states = self.layer_norm(last_hidden_states)
+
+ # update the last element in `hidden_states` after applying `layernorm` above
+ hidden_states = None
+ if output_hidden_states:
+ hidden_states = outputs[1]
+ hidden_states = hidden_states[:-1] + (last_hidden_states,)
+
+ if not return_dict:
+ outputs = (last_hidden_states, hidden_states) + (outputs[2:] if output_hidden_states else outputs[1:])
+ return tuple(v for v in outputs if v is not None)
+
+ return FlaxBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=last_hidden_states,
+ hidden_states=hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartModule with Bart->Blenderbot
+class FlaxBlenderbotModule(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+
+ def setup(self):
+ self.shared = nn.Embed(
+ self.config.vocab_size,
+ self.config.d_model,
+ embedding_init=jax.nn.initializers.normal(self.config.init_std),
+ dtype=self.dtype,
+ )
+
+ self.encoder = FlaxBlenderbotEncoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
+ self.decoder = FlaxBlenderbotDecoder(self.config, dtype=self.dtype, embed_tokens=self.shared)
+
+ def _get_encoder_module(self):
+ return self.encoder
+
+ def _get_decoder_module(self):
+ return self.decoder
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ decoder_outputs = self.decoder(
+ input_ids=decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return FlaxSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+
+class FlaxBlenderbotPreTrainedModel(FlaxPreTrainedModel):
+ config_class = BlenderbotConfig
+ base_model_prefix: str = "model"
+ module_class: nn.Module = None
+
+ def __init__(
+ self,
+ config: BlenderbotConfig,
+ input_shape: Tuple[int] = (1, 1),
+ seed: int = 0,
+ dtype: jnp.dtype = jnp.float32,
+ _do_init: bool = True,
+ **kwargs,
+ ):
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
+
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
+ # init input tensors
+ input_ids = jnp.zeros(input_shape, dtype="i4")
+ # make sure initialization pass will work for FlaxBlenderbotForSequenceClassificationModule
+ input_ids = input_ids.at[(..., -1)].set(self.config.eos_token_id)
+ attention_mask = jnp.ones_like(input_ids)
+ decoder_input_ids = input_ids
+ decoder_attention_mask = jnp.ones_like(input_ids)
+
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+ decoder_position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ params_rng, dropout_rng = jax.random.split(rng)
+ rngs = {"params": params_rng, "dropout": dropout_rng}
+
+ random_params = self.module.init(
+ rngs,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ )["params"]
+
+ if params is not None:
+ random_params = flatten_dict(unfreeze(random_params))
+ params = flatten_dict(unfreeze(params))
+ for missing_key in self._missing_keys:
+ params[missing_key] = random_params[missing_key]
+ self._missing_keys = set()
+ return freeze(unflatten_dict(params))
+ else:
+ return random_params
+
+ def init_cache(self, batch_size, max_length, encoder_outputs):
+ r"""
+ Args:
+ batch_size (`int`):
+ batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
+ max_length (`int`):
+ maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
+ cache.
+ encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
+ `encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
+ `attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
+ is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
+ cross-attention of the decoder.
+ """
+ # init input variables to retrieve cache
+ decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]), decoder_input_ids.shape
+ )
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ **kwargs,
+ )
+
+ init_variables = self.module.init(
+ jax.random.PRNGKey(0),
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ init_cache=True,
+ method=_decoder_forward, # we only need to call the decoder to init the cache
+ )
+ return unfreeze(init_variables["cache"])
+
+ @add_start_docstrings(BLENDERBOT_ENCODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxBaseModelOutput, config_class=BlenderbotConfig)
+ def encode(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ position_ids: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
+
+ >>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
+ >>> encoder_outputs = model.encode(**inputs)
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+ if position_ids is None:
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ def _encoder_forward(module, input_ids, attention_mask, position_ids, **kwargs):
+ encode_module = module._get_encoder_module()
+ return encode_module(input_ids, attention_mask, position_ids, **kwargs)
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ method=_encoder_forward,
+ )
+
+ @add_start_docstrings(BLENDERBOT_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(
+ output_type=FlaxBaseModelOutputWithPastAndCrossAttentions, config_class=BlenderbotConfig
+ )
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import jax.numpy as jnp
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
+
+ >>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> last_decoder_hidden_states = outputs.last_hidden_state
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ if decoder_position_ids is None:
+ if past_key_values is not None:
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
+
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxBlenderbotAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
+ decoder_module = module._get_decoder_module()
+ return decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ **kwargs,
+ )
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs, past = outputs
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs, past = outputs
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+ @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
+ def __call__(
+ self,
+ input_ids: jnp.ndarray,
+ attention_mask: Optional[jnp.ndarray] = None,
+ decoder_input_ids: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ position_ids: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ # prepare encoder inputs
+ if attention_mask is None:
+ attention_mask = jnp.ones_like(input_ids)
+ if position_ids is None:
+ batch_size, sequence_length = input_ids.shape
+ position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length))
+
+ # prepare decoder inputs
+ if decoder_input_ids is None:
+ decoder_input_ids = shift_tokens_right(
+ input_ids, self.config.pad_token_id, decoder_start_token_id=self.config.decoder_start_token_id
+ )
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones_like(decoder_input_ids)
+ if decoder_position_ids is None:
+ batch_size, sequence_length = decoder_input_ids.shape
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {"dropout": dropout_rng} if dropout_rng is not None else {}
+
+ return self.module.apply(
+ {"params": params or self.params},
+ input_ids=jnp.array(input_ids, dtype="i4"),
+ attention_mask=jnp.array(attention_mask, dtype="i4"),
+ position_ids=jnp.array(position_ids, dtype="i4"),
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ )
+
+
+@add_start_docstrings(
+ "The bare MBart Model transformer outputting raw hidden-states without any specific head on top.",
+ BLENDERBOT_START_DOCSTRING,
+)
+class FlaxBlenderbotModel(FlaxBlenderbotPreTrainedModel):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32 # the dtype of the computation
+ module_class = FlaxBlenderbotModule
+
+
+append_call_sample_docstring(FlaxBlenderbotModel, _CHECKPOINT_FOR_DOC, FlaxSeq2SeqModelOutput, _CONFIG_FOR_DOC)
+
+
+# Copied from transformers.models.bart.modeling_flax_bart.FlaxBartForConditionalGenerationModule with Bart->Blenderbot
+class FlaxBlenderbotForConditionalGenerationModule(nn.Module):
+ config: BlenderbotConfig
+ dtype: jnp.dtype = jnp.float32
+ bias_init: Callable[..., jnp.ndarray] = jax.nn.initializers.zeros
+
+ def setup(self):
+ self.model = FlaxBlenderbotModule(config=self.config, dtype=self.dtype)
+ self.lm_head = nn.Dense(
+ self.model.shared.num_embeddings,
+ use_bias=False,
+ dtype=self.dtype,
+ kernel_init=jax.nn.initializers.normal(self.config.init_std),
+ )
+ self.final_logits_bias = self.param("final_logits_bias", self.bias_init, (1, self.model.shared.num_embeddings))
+
+ def _get_encoder_module(self):
+ return self.model.encoder
+
+ def _get_decoder_module(self):
+ return self.model.decoder
+
+ def __call__(
+ self,
+ input_ids,
+ attention_mask,
+ decoder_input_ids,
+ decoder_attention_mask,
+ position_ids,
+ decoder_position_ids,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ deterministic: bool = True,
+ ):
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ position_ids=position_ids,
+ decoder_position_ids=decoder_position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=deterministic,
+ )
+
+ hidden_states = outputs[0]
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = self.model.variables["params"]["shared"]["embedding"]
+ lm_logits = self.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
+ else:
+ lm_logits = self.lm_head(hidden_states)
+
+ lm_logits += jax.lax.stop_gradient(self.final_logits_bias.astype(self.dtype))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return output
+
+ return FlaxSeq2SeqLMOutput(
+ logits=lm_logits,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The Blenderbot Model with a language modeling head. Can be used for summarization.", BLENDERBOT_START_DOCSTRING
+)
+class FlaxBlenderbotForConditionalGeneration(FlaxBlenderbotPreTrainedModel):
+ module_class = FlaxBlenderbotForConditionalGenerationModule
+ dtype: jnp.dtype = jnp.float32
+
+ @add_start_docstrings(BLENDERBOT_DECODE_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=FlaxCausalLMOutputWithCrossAttentions, config_class=BlenderbotConfig)
+ def decode(
+ self,
+ decoder_input_ids,
+ encoder_outputs,
+ encoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_attention_mask: Optional[jnp.ndarray] = None,
+ decoder_position_ids: Optional[jnp.ndarray] = None,
+ past_key_values: dict = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ train: bool = False,
+ params: dict = None,
+ dropout_rng: PRNGKey = None,
+ ):
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> import jax.numpy as jnp
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
+
+ >>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+
+ >>> text = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer(text, max_length=1024, return_tensors="jax")
+ >>> encoder_outputs = model.encode(**inputs)
+
+ >>> decoder_start_token_id = model.config.decoder_start_token_id
+ >>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
+
+ >>> outputs = model.decode(decoder_input_ids, encoder_outputs)
+ >>> logits = outputs.logits
+ ```"""
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
+
+ encoder_hidden_states = encoder_outputs[0]
+ if encoder_attention_mask is None:
+ batch_size, sequence_length = encoder_hidden_states.shape[:2]
+ encoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ batch_size, sequence_length = decoder_input_ids.shape
+ if decoder_attention_mask is None:
+ decoder_attention_mask = jnp.ones((batch_size, sequence_length))
+
+ if decoder_position_ids is None:
+ if past_key_values is not None:
+ raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
+
+ decoder_position_ids = jnp.broadcast_to(
+ jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
+ )
+
+ # Handle any PRNG if needed
+ rngs = {}
+ if dropout_rng is not None:
+ rngs["dropout"] = dropout_rng
+
+ inputs = {"params": params or self.params}
+
+ # if past_key_values are passed then cache is already initialized a private flag init_cache has to be
+ # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
+ # it can be changed by FlaxBlenderbotAttention module
+ if past_key_values:
+ inputs["cache"] = past_key_values
+ mutable = ["cache"]
+ else:
+ mutable = False
+
+ def _decoder_forward(module, decoder_input_ids, decoder_attention_mask, decoder_position_ids, **kwargs):
+ decoder_module = module._get_decoder_module()
+ outputs = decoder_module(
+ decoder_input_ids,
+ decoder_attention_mask,
+ decoder_position_ids,
+ **kwargs,
+ )
+ hidden_states = outputs[0]
+
+ if self.config.tie_word_embeddings:
+ shared_embedding = module.model.variables["params"]["shared"]["embedding"]
+ lm_logits = module.lm_head.apply({"params": {"kernel": shared_embedding.T}}, hidden_states)
+ else:
+ lm_logits = module.lm_head(hidden_states)
+
+ lm_logits += module.final_logits_bias
+ return lm_logits, outputs
+
+ outputs = self.module.apply(
+ inputs,
+ decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
+ decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
+ decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"),
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ deterministic=not train,
+ rngs=rngs,
+ mutable=mutable,
+ method=_decoder_forward,
+ )
+
+ if past_key_values is None:
+ lm_logits, decoder_outputs = outputs
+ else:
+ (lm_logits, decoder_outputs), past = outputs
+
+ if return_dict:
+ outputs = FlaxCausalLMOutputWithCrossAttentions(
+ logits=lm_logits,
+ hidden_states=decoder_outputs.hidden_states,
+ attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ )
+ else:
+ outputs = (lm_logits,) + decoder_outputs[1:]
+
+ # add updated cache to model output
+ if past_key_values is not None and return_dict:
+ outputs["past_key_values"] = unfreeze(past["cache"])
+ return outputs
+ elif past_key_values is not None and not return_dict:
+ outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
+
+ return outputs
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ max_length,
+ attention_mask: Optional[jax.Array] = None,
+ decoder_attention_mask: Optional[jax.Array] = None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # initializing the cache
+ batch_size, seq_length = decoder_input_ids.shape
+
+ past_key_values = self.init_cache(batch_size, max_length, encoder_outputs)
+ # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length.
+ # But since the decoder uses a causal mask, those positions are masked anyways.
+ # Thus we can create a single static attention_mask here, which is more efficient for compilation
+ extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4")
+ if decoder_attention_mask is not None:
+ position_ids = decoder_attention_mask.cumsum(axis=-1) - 1
+ extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, decoder_attention_mask, (0, 0))
+ else:
+ position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length))
+
+ return {
+ "past_key_values": past_key_values,
+ "encoder_outputs": encoder_outputs,
+ "encoder_attention_mask": attention_mask,
+ "decoder_attention_mask": extended_attention_mask,
+ "decoder_position_ids": position_ids,
+ }
+
+ def update_inputs_for_generation(self, model_outputs, model_kwargs):
+ model_kwargs["past_key_values"] = model_outputs.past_key_values
+ model_kwargs["decoder_position_ids"] = model_kwargs["decoder_position_ids"][:, -1:] + 1
+ return model_kwargs
+
+
+FLAX_BLENDERBOT_CONDITIONAL_GENERATION_DOCSTRING = r"""
+ Returns:
+
+ Conversation example::
+
+ ```py
+ >>> from transformers import AutoTokenizer, FlaxBlenderbotForConditionalGeneration
+
+ >>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
+ >>> tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
+
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
+ >>> inputs = tokenizer([UTTERANCE], max_length=1024, return_tensors="np")
+
+ >>> # Generate Reply
+ >>> reply_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=5, early_stopping=True).sequences
+ >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in reply_ids])
+ ```
+"""
+
+overwrite_call_docstring(
+ FlaxBlenderbotForConditionalGeneration,
+ BLENDERBOT_INPUTS_DOCSTRING + FLAX_BLENDERBOT_CONDITIONAL_GENERATION_DOCSTRING,
+)
+append_replace_return_docstrings(
+ FlaxBlenderbotForConditionalGeneration, output_type=FlaxSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC
+)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_tf_blenderbot.py b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_tf_blenderbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccb07d20ecf97d6d5f205669f38534c5953a946f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/modeling_tf_blenderbot.py
@@ -0,0 +1,1556 @@
+# coding=utf-8
+# Copyright 2021 The Facebook, Inc and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 Blenderbot model."""
+
+
+from __future__ import annotations
+
+import os
+import random
+import warnings
+from typing import List, Optional, Tuple, Union
+
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPastAndCrossAttentions,
+ TFSeq2SeqLMOutput,
+ TFSeq2SeqModelOutput,
+)
+
+# Public API
+from ...modeling_tf_utils import (
+ TFCausalLanguageModelingLoss,
+ TFPreTrainedModel,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_blenderbot import BlenderbotConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/blenderbot-400M-distill"
+_CONFIG_FOR_DOC = "BlenderbotConfig"
+
+
+LARGE_NEGATIVE = -1e8
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.shift_tokens_right
+def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int, decoder_start_token_id: int):
+ pad_token_id = tf.cast(pad_token_id, input_ids.dtype)
+ decoder_start_token_id = tf.cast(decoder_start_token_id, input_ids.dtype)
+ start_tokens = tf.fill(
+ (shape_list(input_ids)[0], 1), tf.convert_to_tensor(decoder_start_token_id, input_ids.dtype)
+ )
+ shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
+ # replace possible -100 values in labels by `pad_token_id`
+ shifted_input_ids = tf.where(
+ shifted_input_ids == -100,
+ tf.fill(shape_list(shifted_input_ids), tf.convert_to_tensor(pad_token_id, input_ids.dtype)),
+ shifted_input_ids,
+ )
+
+ # "Verify that `labels` has only positive values and -100"
+ assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.constant(0, dtype=input_ids.dtype))
+
+ # Make sure the assertion op is called by wrapping the result in an identity no-op
+ with tf.control_dependencies([assert_gte0]):
+ shifted_input_ids = tf.identity(shifted_input_ids)
+
+ return shifted_input_ids
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask
+def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0):
+ """
+ Make causal mask used for bi-directional self-attention.
+ """
+ bsz = input_ids_shape[0]
+ tgt_len = input_ids_shape[1]
+ mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE
+ mask_cond = tf.range(shape_list(mask)[-1])
+
+ mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask)
+
+ if past_key_values_length > 0:
+ mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1)
+
+ return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1))
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
+def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ src_len = shape_list(mask)[1]
+ tgt_len = tgt_len if tgt_len is not None else src_len
+ one_cst = tf.constant(1.0)
+ mask = tf.cast(mask, dtype=one_cst.dtype)
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
+
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
+
+
+class TFBlenderbotLearnedPositionalEmbedding(keras.layers.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs):
+ super().__init__(num_embeddings, embedding_dim, **kwargs)
+
+ def call(
+ self, input_shape: tf.TensorShape, past_key_values_length: int = 0, position_ids: tf.Tensor | None = None
+ ):
+ """Input is expected to be of size [bsz x seqlen]."""
+ if position_ids is None:
+ seq_len = input_shape[1]
+ position_ids = tf.range(seq_len, delta=1, name="range")
+ position_ids += past_key_values_length
+
+ return super().call(tf.cast(position_ids, dtype=tf.int32))
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->Blenderbot
+class TFBlenderbotAttention(keras.layers.Layer):
+ """Multi-headed attention from "Attention Is All You Need"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.embed_dim = embed_dim
+
+ self.num_heads = num_heads
+ self.dropout = keras.layers.Dropout(dropout)
+ self.head_dim = embed_dim // num_heads
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+
+ self.k_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj")
+ self.q_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj")
+ self.v_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj")
+ self.out_proj = keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj")
+
+ def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int):
+ return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3))
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ key_value_states: tf.Tensor | None = None,
+ past_key_value: Tuple[Tuple[tf.Tensor]] | None = None,
+ attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor | None]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+ bsz, tgt_len, embed_dim = shape_list(hidden_states)
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ if is_cross_attention and past_key_value is not None:
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = tf.concat([past_key_value[0], key_states], axis=2)
+ value_states = tf.concat([past_key_value[1], value_states], axis=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape)
+ key_states = tf.reshape(key_states, proj_shape)
+ value_states = tf.reshape(value_states, proj_shape)
+
+ src_len = shape_list(key_states)[1]
+ attn_weights = tf.matmul(query_states, key_states, transpose_b=True)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_weights),
+ [bsz * self.num_heads, tgt_len, src_len],
+ message=(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {shape_list(attn_weights)}"
+ ),
+ )
+
+ if attention_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attention_mask),
+ [bsz, 1, tgt_len, src_len],
+ message=(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {shape_list(attention_mask)}"
+ ),
+ )
+
+ attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype)
+ attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_weights = stable_softmax(attn_weights, axis=-1)
+
+ if layer_head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(layer_head_mask),
+ [self.num_heads],
+ message=(
+ f"Head mask for a single layer should be of size {(self.num_heads)}, but is"
+ f" {shape_list(layer_head_mask)}"
+ ),
+ )
+
+ attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape(
+ attn_weights, (bsz, self.num_heads, tgt_len, src_len)
+ )
+ attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len))
+
+ attn_probs = self.dropout(attn_weights, training=training)
+ attn_output = tf.matmul(attn_probs, value_states)
+
+ tf.debugging.assert_equal(
+ shape_list(attn_output),
+ [bsz * self.num_heads, tgt_len, self.head_dim],
+ message=(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {shape_list(attn_output)}"
+ ),
+ )
+
+ attn_output = tf.transpose(
+ tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3)
+ )
+ attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim))
+
+ attn_output = self.out_proj(attn_output)
+ attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len))
+
+ return attn_output, attn_weights, past_key_value
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.embed_dim])
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.embed_dim])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.embed_dim])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.embed_dim])
+
+
+# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartEncoderLayer with MBart->Blenderbot
+class TFBlenderbotEncoderLayer(keras.layers.Layer):
+ def __init__(self, config: BlenderbotConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+ self.self_attn = TFBlenderbotAttention(
+ self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn"
+ )
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+ self.fc1 = keras.layers.Dense(config.encoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ layer_head_mask: tf.Tensor,
+ training: Optional[bool] = False,
+ ):
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
+ attention_mask (`tf.Tensor`): attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ *(encoder_attention_heads,)*
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+ hidden_states, self_attn_weights, _ = self.self_attn(
+ hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask
+ )
+
+ tf.debugging.assert_equal(
+ shape_list(hidden_states),
+ shape_list(residual),
+ message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}",
+ )
+
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ return hidden_states, self_attn_weights
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.encoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+# Copied from transformers.models.mbart.modeling_tf_mbart.TFMBartDecoderLayer with MBart->Blenderbot
+class TFBlenderbotDecoderLayer(keras.layers.Layer):
+ def __init__(self, config: BlenderbotConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.embed_dim = config.d_model
+ self.self_attn = TFBlenderbotAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="self_attn",
+ is_decoder=True,
+ )
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.activation_fn = get_tf_activation(config.activation_function)
+ self.activation_dropout = keras.layers.Dropout(config.activation_dropout)
+
+ self.self_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm")
+ self.encoder_attn = TFBlenderbotAttention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ name="encoder_attn",
+ is_decoder=True,
+ )
+ self.encoder_attn_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm")
+ self.fc1 = keras.layers.Dense(config.decoder_ffn_dim, name="fc1")
+ self.fc2 = keras.layers.Dense(self.embed_dim, name="fc2")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm")
+ self.config = config
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor | None = None,
+ encoder_hidden_states: tf.Tensor | None = None,
+ encoder_attention_mask: tf.Tensor | None = None,
+ layer_head_mask: tf.Tensor | None = None,
+ cross_attn_layer_head_mask: tf.Tensor | None = None,
+ past_key_value: Tuple[tf.Tensor] | None = None,
+ training: Optional[bool] = False,
+ ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape *(batch, seq_len, embed_dim)*
+ attention_mask (`tf.Tensor`): attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ encoder_hidden_states (`tf.Tensor`):
+ cross attention input to the layer of shape *(batch, seq_len, embed_dim)*
+ encoder_attention_mask (`tf.Tensor`): encoder attention mask of size
+ *(batch, 1, tgt_len, src_len)* where padding elements are indicated by very large negative values.
+ layer_head_mask (`tf.Tensor`): mask for attention heads in a given layer of size
+ *(decoder_attention_heads,)*
+ cross_attn_layer_head_mask (`tf.Tensor`): mask for heads of the cross-attention module.
+ *(decoder_attention_heads,)*
+ past_key_value (`Tuple(tf.Tensor)`): cached past key and value projection states
+ """
+ residual = hidden_states
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # Cross-Attention Block
+ cross_attn_present_key_value = None
+ cross_attn_weights = None
+ if encoder_hidden_states is not None:
+ residual = hidden_states
+ hidden_states = self.encoder_attn_layer_norm(hidden_states)
+
+ # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
+ cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None
+ hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn(
+ hidden_states=hidden_states,
+ key_value_states=encoder_hidden_states,
+ attention_mask=encoder_attention_mask,
+ layer_head_mask=cross_attn_layer_head_mask,
+ past_key_value=cross_attn_past_key_value,
+ )
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ # add cross-attn to positions 3,4 of present_key_value tuple
+ present_key_value = present_key_value + cross_attn_present_key_value
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.activation_fn(self.fc1(hidden_states))
+ hidden_states = self.activation_dropout(hidden_states, training=training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = self.dropout(hidden_states, training=training)
+ hidden_states = residual + hidden_states
+
+ return (
+ hidden_states,
+ self_attn_weights,
+ cross_attn_weights,
+ present_key_value,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "self_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.self_attn_layer_norm.name):
+ self.self_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "encoder_attn", None) is not None:
+ with tf.name_scope(self.encoder_attn.name):
+ self.encoder_attn.build(None)
+ if getattr(self, "encoder_attn_layer_norm", None) is not None:
+ with tf.name_scope(self.encoder_attn_layer_norm.name):
+ self.encoder_attn_layer_norm.build([None, None, self.embed_dim])
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.embed_dim])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.config.decoder_ffn_dim])
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+class TFBlenderbotPreTrainedModel(TFPreTrainedModel):
+ config_class = BlenderbotConfig
+ base_model_prefix = "model"
+
+
+BLENDERBOT_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Args:
+ config ([`BlenderbotConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BLENDERBOT_GENERATION_EXAMPLE = r"""
+ Conversation example::
+
+ ```py
+ >>> from transformers import AutoTokenizer, TFBlenderbotForConditionalGeneration
+
+ >>> mname = "facebook/blenderbot-400M-distill"
+ >>> model = TFBlenderbotForConditionalGeneration.from_pretrained(mname)
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
+ >>> UTTERANCE = "My friends are cool but they eat too many carbs."
+ >>> print("Human: ", UTTERANCE)
+
+ >>> inputs = tokenizer([UTTERANCE], return_tensors="tf")
+ >>> reply_ids = model.generate(**inputs)
+ >>> print("Bot: ", tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0])
+
+ >>> REPLY = "I'm not sure"
+ >>> print("Human: ", REPLY)
+ >>> NEXT_UTTERANCE = (
+ ... "My friends are cool but they eat too many carbs. That's unfortunate. "
+ ... "Are they trying to lose weight or are they just trying to be healthier? "
+ ... " I'm not sure."
+ ... )
+ >>> inputs = tokenizer([NEXT_UTTERANCE], return_tensors="tf")
+ >>> next_reply_ids = model.generate(**inputs)
+ >>> print("Bot: ", tokenizer.batch_decode(next_reply_ids, skip_special_tokens=True)[0])
+ ```
+"""
+
+BLENDERBOT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ Blenderbot uses the `bos_token_id` as the starting token for `decoder_input_ids` generation. If
+ `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+ decoder_attention_mask (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ will be made by default and ignore pad tokens. It is not recommended to set this for most use cases.
+ decoder_position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`tf.FloatTensor`, *optional*):
+ hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ of shape `(batch_size, sequence_length, hidden_size)` is a sequence of
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`)
+ contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`). Set to `False` during training, `True` during generation
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@keras_serializable
+class TFBlenderbotEncoder(keras.layers.Layer):
+ config_class = BlenderbotConfig
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a
+ [`TFBlenderbotEncoderLayer`].
+
+ Args:
+ config: BlenderbotConfig
+ """
+
+ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.dropout = keras.layers.Dropout(config.dropout)
+ self.layerdrop = config.encoder_layerdrop
+ self.padding_idx = config.pad_token_id
+ self.max_source_positions = config.max_position_embeddings
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
+
+ self.embed_tokens = embed_tokens
+ self.embed_positions = TFBlenderbotLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ name="embed_positions",
+ )
+ self.layers = [TFBlenderbotEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)]
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
+
+ def get_embed_tokens(self):
+ return self.embed_tokens
+
+ def set_embed_tokens(self, embed_tokens):
+ self.embed_tokens = embed_tokens
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ inputs_embeds=None,
+ attention_mask=None,
+ head_mask=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ """
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, `optional):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
+ in the config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
+ will be used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
+ in eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+ """
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ embed_pos = self.embed_positions(input_shape)
+ hidden_states = inputs_embeds + embed_pos
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # check attention mask and invert
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _expand_mask(attention_mask)
+ else:
+ attention_mask = None
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(head_mask)[0],
+ len(self.layers),
+ message=(
+ f"The head_mask should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(head_mask)[0]}."
+ ),
+ )
+
+ # encoder layers
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = random.uniform(0, 1)
+ if training and (dropout_probability < self.layerdrop): # skip the layer
+ continue
+
+ hidden_states, attn = encoder_layer(
+ hidden_states,
+ attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ )
+
+ if output_attentions:
+ all_attentions += (attn,)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFBlenderbotDecoder(keras.layers.Layer):
+ config_class = BlenderbotConfig
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`TFBlenderbotDecoderLayer`]
+
+ Args:
+ config: BlenderbotConfig
+ embed_tokens: output embedding
+ """
+
+ def __init__(self, config: BlenderbotConfig, embed_tokens: Optional[keras.layers.Embedding] = None, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.padding_idx = config.pad_token_id
+ self.embed_tokens = embed_tokens
+ self.layerdrop = config.decoder_layerdrop
+ self.embed_positions = TFBlenderbotLearnedPositionalEmbedding(
+ config.max_position_embeddings,
+ config.d_model,
+ name="embed_positions",
+ )
+ self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0
+ self.layers = [TFBlenderbotDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)]
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm")
+
+ self.dropout = keras.layers.Dropout(config.dropout)
+
+ def get_embed_tokens(self):
+ return self.embed_tokens
+
+ def set_embed_tokens(self, embed_tokens):
+ self.embed_tokens = embed_tokens
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ inputs_embeds=None,
+ attention_mask=None,
+ position_ids=None,
+ encoder_hidden_states=None,
+ encoder_attention_mask=None,
+ head_mask=None,
+ cross_attn_head_mask=None,
+ past_key_values=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ r"""
+ Args:
+ input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each decoder input sequence tokens in the position embeddings. Selected in the
+ range `[0, config.max_position_embeddings - 1]`.
+ encoder_hidden_states (`tf.Tensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*):
+ Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention
+ of the decoder.
+ encoder_attention_mask (`tf.Tensor` of shape `(batch_size, encoder_sequence_length)`, *optional*):
+ Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values
+ selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`tf.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up
+ decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those
+ that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of
+ all `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value
+ in the config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail. This argument can be used only in eager mode, in graph mode the value in the config
+ will be used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used
+ in eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+ """
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ past_key_values_length = shape_list(past_key_values[0][0])[2] if past_key_values is not None else 0
+
+ # embed positions
+ if position_ids is None:
+ positions = self.embed_positions(input_shape, past_key_values_length)
+ else:
+ positions = self.embed_positions(input_shape, position_ids=position_ids)
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.embed_tokens.input_dim)
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+
+ hidden_states = inputs_embeds
+
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ if input_shape[-1] > 1:
+ combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length)
+ else:
+ combined_attention_mask = _expand_mask(
+ tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1]
+ )
+
+ if attention_mask is not None:
+ combined_attention_mask = combined_attention_mask + _expand_mask(attention_mask, tgt_len=input_shape[-1])
+
+ if encoder_hidden_states is not None and encoder_attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ encoder_attention_mask = _expand_mask(encoder_attention_mask, tgt_len=input_shape[-1])
+
+ hidden_states = hidden_states + positions
+ hidden_states = self.dropout(hidden_states, training=training)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attns = () if (output_attentions and encoder_hidden_states is not None) else None
+ present_key_values = () if use_cache else None
+
+ # check if head_mask and cross_attn_head_mask have a correct number of layers specified if desired
+ for attn_mask_name, attn_mask in [("head_mask", head_mask), ("cross_attn_head_mask", cross_attn_head_mask)]:
+ if attn_mask is not None:
+ tf.debugging.assert_equal(
+ shape_list(attn_mask)[0],
+ len(self.layers),
+ message=(
+ f"The {attn_mask_name} should be specified for {len(self.layers)} layers, but it is for"
+ f" {shape_list(attn_mask)[0]}."
+ ),
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ dropout_probability = random.uniform(0, 1)
+
+ if training and (dropout_probability < self.layerdrop):
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ hidden_states, layer_self_attn, layer_cross_attn, present_key_value = decoder_layer(
+ hidden_states,
+ attention_mask=combined_attention_mask,
+ encoder_hidden_states=encoder_hidden_states,
+ encoder_attention_mask=encoder_attention_mask,
+ layer_head_mask=head_mask[idx] if head_mask is not None else None,
+ cross_attn_layer_head_mask=cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None,
+ past_key_value=past_key_value,
+ )
+
+ if use_cache:
+ present_key_values += (present_key_value,)
+
+ if output_attentions:
+ all_self_attns += (layer_self_attn,)
+
+ if encoder_hidden_states is not None:
+ all_cross_attns += (layer_cross_attn,)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ if not return_dict:
+ return hidden_states, present_key_values, all_hidden_states, all_self_attns, all_cross_attns
+ else:
+ return TFBaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=present_key_values,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embed_positions", None) is not None:
+ with tf.name_scope(self.embed_positions.name):
+ self.embed_positions.build(None)
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.d_model])
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+@keras_serializable
+class TFBlenderbotMainLayer(keras.layers.Layer):
+ config_class = BlenderbotConfig
+
+ def __init__(self, config: BlenderbotConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.shared = keras.layers.Embedding(
+ input_dim=config.vocab_size,
+ output_dim=config.d_model,
+ embeddings_initializer=keras.initializers.TruncatedNormal(stddev=self.config.init_std),
+ name="model.shared",
+ )
+ # Additional attribute to specify the expected name scope of the layer (for loading/storing weights)
+ self.shared.load_weight_prefix = "model.shared"
+
+ self.encoder = TFBlenderbotEncoder(config, self.shared, name="encoder")
+ self.decoder = TFBlenderbotDecoder(config, self.shared, name="decoder")
+
+ def get_input_embeddings(self):
+ return self.shared
+
+ def set_input_embeddings(self, new_embeddings):
+ self.shared = new_embeddings
+ self.encoder.embed_tokens = self.shared
+ self.decoder.embed_tokens = self.shared
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ decoder_input_ids=None,
+ decoder_attention_mask=None,
+ decoder_position_ids=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
+ past_key_values=None,
+ inputs_embeds=None,
+ decoder_inputs_embeds=None,
+ use_cache=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ **kwargs,
+ ):
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True
+ elif return_dict and not isinstance(encoder_outputs, TFBaseModelOutput):
+ encoder_outputs = TFBaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+ # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False
+ elif not return_dict and not isinstance(encoder_outputs, tuple):
+ encoder_outputs = encoder_outputs.to_tuple()
+
+ decoder_outputs = self.decoder(
+ decoder_input_ids,
+ attention_mask=decoder_attention_mask,
+ position_ids=decoder_position_ids,
+ encoder_hidden_states=encoder_outputs[0],
+ encoder_attention_mask=attention_mask,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ # The shared/tied weights expect to be in the model base namespace
+ # Adding "/" to the end (not the start!) of a tf.name_scope puts it in the root namespace rather than
+ # the current one.
+ with tf.name_scope(self.shared.load_weight_prefix + "/" + self.shared.name + "/"):
+ self.shared.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "decoder", None) is not None:
+ with tf.name_scope(self.decoder.name):
+ self.decoder.build(None)
+
+
+@add_start_docstrings(
+ "The bare BLENDERBOT Model outputting raw hidden-states without any specific head on top.",
+ BLENDERBOT_START_DOCSTRING,
+)
+class TFBlenderbotModel(TFBlenderbotPreTrainedModel):
+ def __init__(self, config: BlenderbotConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.model = TFBlenderbotMainLayer(config, name="model")
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
+ if pretrained_model_name_or_path == "facebook/blenderbot-90M":
+ from ..blenderbot_small import TFBlenderbotSmallModel
+
+ warnings.warn(
+ "The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
+ " checkpoint `facebook/small_blenderbot-90M` with"
+ " `TFBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')`"
+ " instead.",
+ FutureWarning,
+ )
+ return TFBlenderbotSmallModel.from_pretrained(pretrained_model_name_or_path)
+
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSeq2SeqModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ decoder_input_ids: tf.Tensor | None = None,
+ decoder_attention_mask: tf.Tensor | None = None,
+ decoder_position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ decoder_head_mask: tf.Tensor | None = None,
+ cross_attn_head_mask: tf.Tensor | None = None,
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
+ past_key_values: List[tf.Tensor] | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ decoder_inputs_embeds: tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: Optional[bool] = False,
+ **kwargs,
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqModelOutput]:
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ encoder_outputs=encoder_outputs,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqModelOutput(
+ last_hidden_state=output.last_hidden_state,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+
+
+# Copied from transformers.models.bart.modeling_tf_bart.BiasLayer
+class BiasLayer(keras.layers.Layer):
+ """
+ Bias as a layer. It is used for serialization purposes: `keras.Model.save_weights` stores on a per-layer basis,
+ so all weights have to be registered in a layer.
+ """
+
+ def __init__(self, shape, initializer, trainable, name, **kwargs):
+ super().__init__(name=name, **kwargs)
+ # Note: the name of this variable will NOT be scoped when serialized, i.e. it will not be in the format of
+ # "outer_layer/inner_layer/.../name:0". Instead, it will be "name:0". For further details, see:
+ # https://github.com/huggingface/transformers/pull/18833#issuecomment-1233090214
+ self.bias = self.add_weight(name=name, shape=shape, initializer=initializer, trainable=trainable)
+
+ def call(self, x):
+ return x + self.bias
+
+
+@add_start_docstrings(
+ "The BLENDERBOT Model with a language modeling head. Can be used for summarization.",
+ BLENDERBOT_START_DOCSTRING,
+)
+class TFBlenderbotForConditionalGeneration(TFBlenderbotPreTrainedModel, TFCausalLanguageModelingLoss):
+ _keys_to_ignore_on_load_unexpected = [
+ r"model.encoder.embed_tokens.weight",
+ r"model.decoder.embed_tokens.weight",
+ ]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.model = TFBlenderbotMainLayer(config, name="model")
+ self.use_cache = config.use_cache
+ # final_bias_logits is registered as a buffer in pytorch, so not trainable for the sake of consistency.
+ self.bias_layer = BiasLayer(
+ name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False
+ )
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_output_embeddings(self):
+ return self.get_input_embeddings()
+
+ def set_output_embeddings(self, value):
+ self.set_input_embeddings(value)
+
+ def get_bias(self):
+ return {"final_logits_bias": self.bias_layer.bias}
+
+ def set_bias(self, value):
+ # Replaces the existing layers containing bias for correct (de)serialization.
+ vocab_size = value["final_logits_bias"].shape[-1]
+ self.bias_layer = BiasLayer(
+ name="final_logits_bias", shape=[1, vocab_size], initializer="zeros", trainable=False
+ )
+ self.bias_layer.bias.assign(value["final_logits_bias"])
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
+ if pretrained_model_name_or_path == "facebook/blenderbot-90M":
+ from ..blenderbot_small import TFBlenderbotSmallForConditionalGeneration
+
+ warnings.warn(
+ "The checkpoint `facebook/blenderbot-90M` is deprecated. In the future, please use the identical"
+ " checkpoint `facebook/small_blenderbot-90M` with"
+ " `TFBlenderbotSmallForConditionalGeneration.from_pretrained('facebook/small_blenderbot-90M')`"
+ " instead.",
+ FutureWarning,
+ )
+ return TFBlenderbotSmallForConditionalGeneration.from_pretrained(pretrained_model_name_or_path)
+
+ return super().from_pretrained(pretrained_model_name_or_path, *model_args, **kwargs)
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(BLENDERBOT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(BLENDERBOT_GENERATION_EXAMPLE)
+ def call(
+ self,
+ input_ids: tf.Tensor | None = None,
+ attention_mask: tf.Tensor | None = None,
+ decoder_input_ids: tf.Tensor | None = None,
+ decoder_attention_mask: tf.Tensor | None = None,
+ decoder_position_ids: tf.Tensor | None = None,
+ head_mask: tf.Tensor | None = None,
+ decoder_head_mask: tf.Tensor | None = None,
+ cross_attn_head_mask: tf.Tensor | None = None,
+ encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None,
+ past_key_values: List[tf.Tensor] | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ decoder_inputs_embeds: tf.Tensor | None = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ training: Optional[bool] = False,
+ ) -> Union[Tuple[tf.Tensor], TFSeq2SeqLMOutput]:
+ r"""
+ labels (`tf.tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ """
+ if labels is not None:
+ labels = tf.where(
+ labels == self.config.pad_token_id,
+ tf.cast(tf.fill(shape_list(labels), -100), labels.dtype),
+ labels,
+ )
+ use_cache = False
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ decoder_input_ids = shift_tokens_right(
+ labels, self.config.pad_token_id, self.config.decoder_start_token_id
+ )
+
+ outputs = self.model(
+ input_ids,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ decoder_position_ids=decoder_position_ids,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ lm_logits = tf.matmul(outputs[0], self.model.shared.weights, transpose_b=True)
+ lm_logits = self.bias_layer(lm_logits)
+ masked_lm_loss = None if labels is None else self.hf_compute_loss(labels, lm_logits)
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+ return TFSeq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values, # index 1 of d outputs
+ decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs
+ decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs
+ cross_attentions=outputs.cross_attentions, # index 4 of d outputs
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs
+ encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out
+ encoder_attentions=outputs.encoder_attentions, # 2 of e out
+ )
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output
+ def serving_output(self, output):
+ pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None
+ dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None
+ dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None
+ cross_attns = tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions else None
+ enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None
+ enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None
+
+ return TFSeq2SeqLMOutput(
+ logits=output.logits,
+ past_key_values=pkv,
+ decoder_hidden_states=dec_hs,
+ decoder_attentions=dec_attns,
+ cross_attentions=cross_attns,
+ encoder_last_hidden_state=output.encoder_last_hidden_state,
+ encoder_hidden_states=enc_hs,
+ encoder_attentions=enc_attns,
+ )
+
+ # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ decoder_attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ # cut decoder_input_ids if past_key_values is used
+ if past_key_values is not None:
+ decoder_input_ids = decoder_input_ids[:, -1:]
+
+ if decoder_attention_mask is not None: # xla
+ decoder_position_ids = tf.math.cumsum(decoder_attention_mask, axis=-1, exclusive=True)[:, -1:]
+ elif past_key_values is not None: # no xla + past_key_values
+ decoder_position_ids = past_key_values[0][0].shape[2]
+ else: # no xla + no past_key_values
+ decoder_position_ids = tf.range(decoder_input_ids.shape[1])
+
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "decoder_attention_mask": decoder_attention_mask,
+ "decoder_position_ids": decoder_position_ids,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "model", None) is not None:
+ with tf.name_scope(self.model.name):
+ self.model.build(None)
+ if getattr(self, "bias_layer", None) is not None:
+ with tf.name_scope(self.bias_layer.name):
+ self.bias_layer.build(None)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot.py b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot.py
new file mode 100644
index 0000000000000000000000000000000000000000..b812f84b7d2d458c63df970ed6a8f215bbd5ce54
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot.py
@@ -0,0 +1,427 @@
+# coding=utf-8
+# Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization class for Blenderbot."""
+
+import json
+import os
+from functools import lru_cache
+from typing import List, Optional, Tuple
+
+import regex as re
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "merges_file": "merges.txt",
+ "tokenizer_config_file": "tokenizer_config.json",
+}
+
+
+@lru_cache()
+# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+ characters the bpe code barfs on.
+
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
+ tables between utf-8 bytes and unicode strings.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+# Copied from transformers.models.roberta.tokenization_roberta.get_pairs
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word.
+
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+class BlenderbotTokenizer(PreTrainedTokenizer):
+ """
+ Constructs a Blenderbot tokenizer, derived from the GPT-2 tokenizer, using byte-level Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import BlenderbotTokenizer
+
+ >>> tokenizer = BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B")
+ >>> tokenizer.add_prefix_space = False
+ >>> tokenizer("Hello world")["input_ids"]
+ [47, 921, 86, 1085, 2]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [6950, 1085, 2]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (Blenderbot tokenizer detect beginning of words by the preceding space).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.__init__ with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = (
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
+ if isinstance(mask_token, str)
+ else mask_token
+ )
+
+ # these special tokens are not part of the vocab.json, let's add them in the correct order
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.errors = errors # how to handle errors in decoding
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+ self.cache = {}
+ self.add_prefix_space = add_prefix_space
+
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
+
+ super().__init__(
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ **kwargs,
+ )
+
+ @property
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def vocab_size(self):
+ return len(self.encoder)
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_vocab with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def get_vocab(self):
+ vocab = dict(self.encoder).copy()
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.bpe with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ self.cache[token] = word
+ return word
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._tokenize with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ bpe_tokens = []
+ for token in re.findall(self.pat, text):
+ token = "".join(
+ self.byte_encoder[b] for b in token.encode("utf-8")
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
+ return bpe_tokens
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_token_to_id with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer._convert_id_to_token with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index)
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.convert_tokens_to_string with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ text = "".join(tokens)
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
+ return text
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.save_vocabulary with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ writer.write("#version: 0.2\n")
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.get_special_tokens_mask with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.create_token_type_ids_from_sequences with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ # Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.prepare_for_tokenization with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs):
+ add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space)
+ if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()):
+ text = " " + text
+ return (text, kwargs)
+
+ def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A Blenderbot sequence has the following format:
+ - single sequence: ` X `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added
+ token_ids_1 (`List[int]`, *optional*):
+ Will be ignored
+ Returns:
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ return token_ids_0 + [self.eos_token_id]
+
+ @property
+ def default_chat_template(self):
+ """
+ A very simple chat template that just adds whitespace between messages.
+ """
+ logger.warning_once(
+ "\nNo chat template is defined for this tokenizer - using the default template "
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
+ )
+ return (
+ "{% for message in messages %}"
+ "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
+ "{{ message['content'] }}"
+ "{% if not loop.last %}{{ ' ' }}{% endif %}"
+ "{% endfor %}"
+ "{{ eos_token }}"
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot_fast.py b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..879173282da1e236c6e207012f0f4babe7f79c5b
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/blenderbot/tokenization_blenderbot_fast.py
@@ -0,0 +1,309 @@
+# coding=utf-8
+# Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Fast Tokenization class for Blenderbot."""
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import pre_tokenizers, processors
+
+from ...tokenization_utils_base import AddedToken, BatchEncoding
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_blenderbot import BlenderbotTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "merges_file": "merges.txt",
+ "tokenizer_config_file": "tokenizer_config.json",
+}
+
+
+class BlenderbotTokenizerFast(PreTrainedTokenizerFast):
+ """
+ Construct a "fast" Blenderbot tokenizer (backed by HuggingFace's *tokenizers* library), derived from the GPT-2
+ tokenizer, using byte-level Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import BlenderbotTokenizerFast
+
+ >>> tokenizer = BlenderbotTokenizerFast.from_pretrained("facebook/blenderbot-3B")
+ >>> tokenizer("Hello world")["input_ids"]
+ [6950, 1085, 2]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [6950, 1085, 2]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (Blenderbot tokenizer detect beginning of words by the preceding space).
+ trim_offsets (`bool`, *optional*, defaults to `True`):
+ Whether the post processing step should trim offsets to avoid including whitespaces.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+ slow_tokenizer_class = BlenderbotTokenizer
+
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.__init__ with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def __init__(
+ self,
+ vocab_file=None,
+ merges_file=None,
+ tokenizer_file=None,
+ errors="replace",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="",
+ pad_token="",
+ mask_token="",
+ add_prefix_space=False,
+ trim_offsets=True,
+ **kwargs,
+ ):
+ mask_token = (
+ AddedToken(mask_token, lstrip=True, rstrip=False, normalized=False)
+ if isinstance(mask_token, str)
+ else mask_token
+ )
+ super().__init__(
+ vocab_file,
+ merges_file,
+ tokenizer_file=tokenizer_file,
+ errors=errors,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ add_prefix_space=add_prefix_space,
+ trim_offsets=trim_offsets,
+ **kwargs,
+ )
+
+ pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())
+ if pre_tok_state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
+ pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop("type"))
+ pre_tok_state["add_prefix_space"] = add_prefix_space
+ self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)
+
+ self.add_prefix_space = add_prefix_space
+
+ tokenizer_component = "post_processor"
+ tokenizer_component_instance = getattr(self.backend_tokenizer, tokenizer_component, None)
+ if tokenizer_component_instance:
+ state = json.loads(tokenizer_component_instance.__getstate__())
+
+ # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
+ if "sep" in state:
+ state["sep"] = tuple(state["sep"])
+ if "cls" in state:
+ state["cls"] = tuple(state["cls"])
+
+ changes_to_apply = False
+
+ if state.get("add_prefix_space", add_prefix_space) != add_prefix_space:
+ state["add_prefix_space"] = add_prefix_space
+ changes_to_apply = True
+
+ if state.get("trim_offsets", trim_offsets) != trim_offsets:
+ state["trim_offsets"] = trim_offsets
+ changes_to_apply = True
+
+ if changes_to_apply:
+ component_class = getattr(processors, state.pop("type"))
+ new_value = component_class(**state)
+ setattr(self.backend_tokenizer, tokenizer_component, new_value)
+
+ @property
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def mask_token(self) -> str:
+ """
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
+ having been set.
+
+ Blenderbot tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
+ comprise the space before the **.
+ """
+ if self._mask_token is None:
+ if self.verbose:
+ logger.error("Using mask_token, but it is not set yet.")
+ return None
+ return str(self._mask_token)
+
+ @mask_token.setter
+ def mask_token(self, value):
+ """
+ Overriding the default behavior of the mask token to have it eat the space before it.
+
+ This is needed to preserve backward compatibility with all the previously used models based on Roberta.
+ """
+ # Mask token behave like a normal word, i.e. include the space before it
+ # So we set lstrip to True
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
+ self._mask_token = value
+
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast._batch_encode_plus with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._batch_encode_plus(*args, **kwargs)
+
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast._encode_plus with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def _encode_plus(self, *args, **kwargs) -> BatchEncoding:
+ is_split_into_words = kwargs.get("is_split_into_words", False)
+
+ assert self.add_prefix_space or not is_split_into_words, (
+ f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True "
+ "to use it with pretokenized inputs."
+ )
+
+ return super()._encode_plus(*args, **kwargs)
+
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.save_vocabulary with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
+
+ # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.create_token_type_ids_from_sequences with Roberta->Blenderbot, RoBERTa->Blenderbot
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. Blenderbot does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def build_inputs_with_special_tokens(self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None):
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A Blenderbot sequence has the following format:
+ - single sequence: ` X `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added
+ token_ids_1 (`List[int]`, *optional*):
+ Will be ignored
+ Returns:
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ return token_ids_0 + [self.eos_token_id]
+
+ @property
+ # Copied from transformers.models.blenderbot.tokenization_blenderbot.BlenderbotTokenizer.default_chat_template
+ def default_chat_template(self):
+ """
+ A very simple chat template that just adds whitespace between messages.
+ """
+ logger.warning_once(
+ "\nNo chat template is defined for this tokenizer - using the default template "
+ f"for the {self.__class__.__name__} class. If the default is not appropriate for "
+ "your model, please set `tokenizer.chat_template` to an appropriate template. "
+ "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n"
+ )
+ return (
+ "{% for message in messages %}"
+ "{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}"
+ "{{ message['content'] }}"
+ "{% if not loop.last %}{{ ' ' }}{% endif %}"
+ "{% endfor %}"
+ "{{ eos_token }}"
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9fc2c84e701143464b08ac9fd9b3e6c77a169a7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/configuration_clvp.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/configuration_clvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3e5f0f5ba7aeb047fffa8cf28b6f9cae5a75dbf0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/configuration_clvp.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/convert_clvp_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/convert_clvp_to_hf.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f7dc0a1c7251db8ad0dcb81162401979c9e081da
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/convert_clvp_to_hf.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/feature_extraction_clvp.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/feature_extraction_clvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..21d3aff2d3ac12411735259d43392bb26f79e60b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/feature_extraction_clvp.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/modeling_clvp.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/modeling_clvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1f83e38a56ff14a599339d67bdbe0cb487e0b66c
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/modeling_clvp.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/number_normalizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/number_normalizer.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dfc052ac3eb253e8716d3e167d860937574931cb
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/number_normalizer.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/processing_clvp.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/processing_clvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d81254bceafb565ad6be8858745de302455c9be7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/processing_clvp.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5052bf4e3219c69af0ca48802227b5e3956bfffc
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/clvp/__pycache__/tokenization_clvp.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/convert_clvp_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/clvp/convert_clvp_to_hf.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ae6fd4254978f28095ae312c98b1ef6f21fa315
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/clvp/convert_clvp_to_hf.py
@@ -0,0 +1,234 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Weights conversion script for CLVP
+"""
+
+import argparse
+import os
+
+import torch
+from huggingface_hub import hf_hub_download
+
+from transformers import ClvpConfig, ClvpModelForConditionalGeneration
+
+
+_MODELS = {
+ "clvp": "https://huggingface.co/jbetker/tortoise-tts-v2/blob/main/.models/clvp2.pth",
+ "decoder": "https://huggingface.co/jbetker/tortoise-tts-v2/blob/main/.models/autoregressive.pth",
+}
+
+dim = 1024
+sub_dim = dim // 16
+
+CLVP_ENCODERS_MAPPING = {
+ "text_transformer.transformer.attn_layers": "text_encoder_model",
+ "speech_transformer.transformer.attn_layers": "speech_encoder_model",
+ "text_transformer.transformer.norm": "text_encoder_model.final_layer_norm",
+ "speech_transformer.transformer.norm": "speech_encoder_model.final_layer_norm",
+ "to_text_latent": "text_encoder_model.projection",
+ "to_speech_latent": "speech_encoder_model.projection",
+ "text_emb": "text_encoder_model.token_embedding",
+ "speech_emb": "speech_encoder_model.token_embedding",
+ "1.wrap.net.0": "mlp.fc1",
+ "1.wrap.net.3": "mlp.fc2",
+ "1.wrap": "self_attn",
+ "to_out": "out_proj",
+ "to_q": "q_proj",
+ "to_k": "k_proj",
+ "to_v": "v_proj",
+ "temperature": "logit_scale",
+}
+
+CLVP_DECODER_MAPPING = {
+ "conditioning_encoder.init": "conditioning_encoder.mel_conv",
+ "conditioning_encoder.attn": "conditioning_encoder.mel_attn_blocks",
+ "mel_attn_blocks": "group_norms",
+ ".norm.weight": ".weight",
+ ".norm.bias": ".bias",
+ "text_embedding": "conditioning_encoder.text_token_embedding",
+ "text_pos_embedding.emb": "conditioning_encoder.text_position_embedding",
+ "final_norm": "speech_decoder_model.final_norm",
+ "mel_head": "speech_decoder_model.lm_head",
+ "gpt.ln_f": "speech_decoder_model.model.decoder.layer_norm",
+ "mel_embedding": "speech_decoder_model.model.decoder.input_embeds_layer",
+ "mel_pos_embedding.emb": "speech_decoder_model.model.decoder.position_embeds_layer",
+ "gpt.h": "speech_decoder_model.model.decoder.layers",
+ "ln_1": "input_layernorm",
+ "ln_2": "post_attention_layernorm",
+}
+
+
+def update_index(present_index):
+ if present_index % 2 == 0:
+ return int(present_index / 2)
+ else:
+ return int((present_index - 1) / 2)
+
+
+def convert_encoder_weights(original_weights):
+ converted_weights = {}
+ original_weights_keys = sorted(original_weights.keys())
+ for original_key in original_weights_keys:
+ updated_key = original_key
+ # for input_rmsnorm.weight and post_attention_rmsnorm.weight
+ if "0.0.g" in updated_key:
+ present_index = updated_key.split(".")[4]
+ if int(present_index) % 2 == 0:
+ updated_key = updated_key.replace("0.0.g", "input_rmsnorm.weight")
+ else:
+ updated_key = updated_key.replace("0.0.g", "post_attention_rmsnorm.weight")
+
+ if "transformer.attn_layers.layers" in updated_key:
+ present_index = updated_key.split(".")[4]
+ updated_index = update_index(int(present_index))
+ updated_key = updated_key.replace(
+ f"transformer.attn_layers.layers.{present_index}", f"transformer.attn_layers.layers.{updated_index}"
+ )
+
+ for k, v in CLVP_ENCODERS_MAPPING.items():
+ if k in updated_key:
+ updated_key = updated_key.replace(k, v)
+
+ converted_weights[updated_key] = original_weights.pop(original_key)
+
+ return converted_weights
+
+
+def convert_decoder_weights(original_weights):
+ converted_weights = {}
+ original_weights_keys = sorted(original_weights.keys())
+ for original_key in original_weights_keys:
+ updated_key = original_key
+ if len(updated_key.split(".")) > 3:
+ index, attr = updated_key.split(".")[2], updated_key.split(".")[-1]
+
+ # for decoder attention
+ if "attn.c_attn" in updated_key:
+ if attr == "weight":
+ slice1, slice2, slice3 = original_weights[updated_key].squeeze(-1).T.split(split_size=dim, dim=0)
+ else:
+ slice1, slice2, slice3 = original_weights[updated_key].split(split_size=dim, dim=0)
+ converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.q_proj.{attr}"] = slice1
+ converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.k_proj.{attr}"] = slice2
+ converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.v_proj.{attr}"] = slice3
+ continue
+
+ if "attn.c_proj" in updated_key:
+ converted_weights[f"speech_decoder_model.model.decoder.layers.{index}.attn.out_proj.{attr}"] = (
+ original_weights[updated_key].squeeze(-1).T
+ )
+ continue
+
+ if "attn.bias" in updated_key or "attn.masked_bias" in updated_key or "text_head" in updated_key:
+ original_weights.pop(updated_key)
+ continue
+
+ # conditional encoder attention
+ if "qkv" in updated_key:
+ if attr == "weight":
+ slice1, slice2, slice3 = original_weights[updated_key].squeeze(-1).split(split_size=dim, dim=0)
+ else:
+ slice1, slice2, slice3 = original_weights[updated_key].split(split_size=dim, dim=0)
+
+ indices = torch.arange(dim)
+ index1, index2, index3 = (
+ indices.unfold(0, sub_dim, sub_dim * 3).flatten(),
+ indices[sub_dim:].unfold(0, sub_dim, sub_dim * 3).flatten(),
+ indices[2 * sub_dim :].unfold(0, sub_dim, sub_dim * 3).flatten(),
+ )
+
+ converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.q_proj.{attr}"] = torch.concatenate(
+ [slice1[index1], slice2[index3], slice3[index2]],
+ axis=0,
+ )
+ converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.k_proj.{attr}"] = torch.concatenate(
+ [slice1[index2], slice2[index1], slice3[index3]],
+ axis=0,
+ )
+ converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.v_proj.{attr}"] = torch.concatenate(
+ [slice1[index3], slice2[index2], slice3[index1]],
+ axis=0,
+ )
+ continue
+
+ if "proj_out" in updated_key:
+ converted_weights[f"conditioning_encoder.mel_attn_blocks.{index}.out_proj.{attr}"] = original_weights[
+ updated_key
+ ].squeeze(-1)
+ continue
+
+ for k, v in CLVP_DECODER_MAPPING.items():
+ if k in updated_key:
+ updated_key = updated_key.replace(k, v)
+
+ converted_weights[updated_key] = original_weights.pop(original_key)
+
+ return converted_weights
+
+
+def _download(url: str, root: str):
+ repo_id = f"{url.split('/')[3]}/{url.split('/')[4]}"
+ filename = f"{url.split('/')[-2]}/{url.split('/')[-1]}"
+ hf_hub_download(
+ repo_id=repo_id,
+ filename=filename,
+ force_filename=root,
+ local_dir_use_symlinks=False,
+ )
+
+
+def convert_clvp_weights(checkpoint_path, pytorch_dump_folder_path):
+ converted_checkpoint = {}
+
+ for each_model_name, each_model_url in _MODELS.items():
+ each_model_path = os.path.join(checkpoint_path, each_model_url.split("/")[-1])
+ if not os.path.exists(each_model_path):
+ print(f"\n{each_model_name} was not found! Downloading it to {each_model_path}")
+ _download(url=each_model_url, root=each_model_path)
+
+ if each_model_name == "clvp":
+ clvp_checkpoint = torch.load(each_model_path, map_location="cpu")
+ else:
+ decoder_checkpoint = torch.load(each_model_path, map_location="cpu")
+
+ # Converting the weights
+ converted_checkpoint.update(**convert_encoder_weights(clvp_checkpoint))
+ converted_checkpoint.update(**convert_decoder_weights(decoder_checkpoint))
+
+ config = ClvpConfig.from_pretrained("susnato/clvp_dev")
+ model = ClvpModelForConditionalGeneration(config)
+
+ model.load_state_dict(converted_checkpoint, strict=True)
+ model.save_pretrained(pytorch_dump_folder_path)
+ print(f"Model saved at {pytorch_dump_folder_path}!")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # # Required parameters
+ parser.add_argument(
+ "--checkpoint_path", type=str, help="Path to the folder of downloaded checkpoints. (Please enter full path)"
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path",
+ default=None,
+ type=str,
+ help="Path to the output PyTorch model. (Please enter full path)",
+ )
+ args = parser.parse_args()
+
+ convert_clvp_weights(args.checkpoint_path, args.pytorch_dump_folder_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/feature_extraction_clvp.py b/venv/lib/python3.10/site-packages/transformers/models/clvp/feature_extraction_clvp.py
new file mode 100644
index 0000000000000000000000000000000000000000..69741a03f575b8b5900be4b83e9a59e33536789e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/clvp/feature_extraction_clvp.py
@@ -0,0 +1,238 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Feature extractor class for CLVP
+"""
+
+from typing import List, Optional, Union
+
+import numpy as np
+
+from ...audio_utils import mel_filter_bank, spectrogram, window_function
+from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
+from ...feature_extraction_utils import BatchFeature
+from ...utils import TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class ClvpFeatureExtractor(SequenceFeatureExtractor):
+ r"""
+ Constructs a CLVP feature extractor.
+
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
+
+ This class extracts log-mel-spectrogram features from raw speech using a custom numpy implementation of the `Short
+ Time Fourier Transform` which should match pytorch's `torch.stft` equivalent.
+
+ Args:
+ feature_size (`int`, *optional*, defaults to 80):
+ The feature dimension of the extracted features.
+ sampling_rate (`int`, *optional*, defaults to 22050):
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
+ default_audio_length (`int`, *optional*, defaults to 6):
+ The default length of raw audio in seconds. If `max_length` is not set during `__call__` then it will
+ automatically be set to default_audio_length * `self.sampling_rate`.
+ hop_length (`int`, *optional*, defaults to 256):
+ Length of the overlaping windows for the STFT used to obtain the Mel Frequency coefficients.
+ chunk_length (`int`, *optional*, defaults to 30):
+ The maximum number of chuncks of `sampling_rate` samples used to trim and pad longer or shorter audio
+ sequences.
+ n_fft (`int`, *optional*, defaults to 1024):
+ Size of the Fourier transform.
+ padding_value (`float`, *optional*, defaults to 0.0):
+ Padding value used to pad the audio. Should correspond to silences.
+ mel_norms (`list` of length `feature_size`, *optional*):
+ If `mel_norms` is provided then it will be used to normalize the log-mel spectrograms along each
+ mel-filter.
+ return_attention_mask (`bool`, *optional*, defaults to `False`):
+ Whether to return the attention mask. If left to the default, it will return the attention mask.
+
+ [What are attention masks?](../glossary#attention-mask)
+ """
+
+ model_input_names = ["input_features", "attention_mask"]
+
+ def __init__(
+ self,
+ feature_size=80,
+ sampling_rate=22050,
+ default_audio_length=6,
+ hop_length=256,
+ chunk_length=30,
+ n_fft=1024,
+ padding_value=0.0,
+ mel_norms=None,
+ return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask
+ **kwargs,
+ ):
+ super().__init__(
+ feature_size=feature_size,
+ sampling_rate=sampling_rate,
+ padding_value=padding_value,
+ return_attention_mask=return_attention_mask,
+ **kwargs,
+ )
+ self.n_fft = n_fft
+ self.hop_length = hop_length
+ self.chunk_length = chunk_length
+ self.n_samples = chunk_length * sampling_rate
+ self.nb_max_frames = self.n_samples // hop_length
+ self.sampling_rate = sampling_rate
+ self.default_audio_length = default_audio_length
+ self.mel_norms = mel_norms
+ self.mel_filters = mel_filter_bank(
+ num_frequency_bins=1 + (n_fft // 2),
+ num_mel_filters=feature_size,
+ min_frequency=0.0,
+ max_frequency=8000.0,
+ sampling_rate=sampling_rate,
+ norm="slaney",
+ mel_scale="htk",
+ )
+
+ def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray:
+ """
+ This method first computes the log-mel spectrogram of the provided audio then applies normalization along the
+ each mel-filterbank, if `mel_norms` is provided.
+ """
+ log_spec = spectrogram(
+ waveform,
+ window_function(self.n_fft, "hann"),
+ frame_length=self.n_fft,
+ hop_length=self.hop_length,
+ power=2.0,
+ mel_filters=self.mel_filters,
+ log_mel=None,
+ )
+
+ log_spec = np.log(np.clip(log_spec, a_min=1e-5, a_max=None))
+
+ if self.mel_norms is not None:
+ log_spec = log_spec / np.array(self.mel_norms)[:, None]
+
+ return log_spec
+
+ def __call__(
+ self,
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
+ sampling_rate: Optional[int] = None,
+ truncation: bool = True,
+ pad_to_multiple_of: Optional[int] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ return_attention_mask: Optional[bool] = True,
+ padding: Optional[str] = "max_length",
+ max_length: Optional[int] = None,
+ **kwargs,
+ ) -> BatchFeature:
+ """
+ `ClvpFeatureExtractor` is used to extract various voice specific properties such as the pitch and tone of the
+ voice, speaking speed, and even speaking defects like a lisp or stuttering from a sample voice or `raw_speech`.
+
+ First the voice is padded or truncated in a way such that it becomes a waveform of `self.default_audio_length`
+ seconds long and then the log-mel spectrogram is extracted from it.
+
+ Args:
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
+ stereo, i.e. single float per timestep.
+ sampling_rate (`int`, *optional*):
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
+ `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
+ pipeline.
+ truncation (`bool`, *optional*, default to `True`):
+ Activates truncation to cut input sequences longer than *max_length* to *max_length*.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
+ return_attention_mask (`bool`, *optional*, defaults to `True`):
+ Whether to return the attention mask. If left to the default, it will return the attention mask.
+
+ [What are attention masks?](../glossary#attention-mask)
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ padding_value (`float`, defaults to 0.0):
+ The value that is used to fill the padding values / vectors.
+ max_length (`int`, *optional*):
+ The maximum input length of the inputs.
+ """
+
+ if sampling_rate is not None:
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
+ f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
+ f" was sampled with {self.sampling_rate} and not {sampling_rate}."
+ )
+ else:
+ logger.warning(
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
+ "Failing to do so can result in silent errors that might be hard to debug."
+ )
+
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
+ if is_batched_numpy and len(raw_speech.shape) > 2:
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
+ is_batched = is_batched_numpy or (
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
+ )
+
+ if is_batched:
+ raw_speech = [np.asarray([speech], dtype=np.float32).T for speech in raw_speech]
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
+ raw_speech = raw_speech.astype(np.float32)
+
+ # always return batch
+ if not is_batched:
+ raw_speech = [np.asarray([raw_speech]).T]
+
+ batched_speech = BatchFeature({"input_features": raw_speech})
+
+ max_length = self.default_audio_length * self.sampling_rate if max_length is None else max_length
+
+ padded_inputs = self.pad(
+ batched_speech,
+ padding=padding,
+ max_length=max_length,
+ truncation=truncation,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ # make sure list is in array format
+ input_features = padded_inputs.get("input_features").transpose(2, 0, 1)
+
+ input_features = [
+ self._np_extract_fbank_features(waveform).astype(np.float32) for waveform in input_features[0]
+ ]
+
+ if isinstance(input_features[0], List):
+ padded_inputs["input_features"] = [np.asarray(feature) for feature in input_features]
+ else:
+ padded_inputs["input_features"] = input_features
+
+ return padded_inputs.convert_to_tensors(return_tensors)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/modeling_clvp.py b/venv/lib/python3.10/site-packages/transformers/models/clvp/modeling_clvp.py
new file mode 100644
index 0000000000000000000000000000000000000000..654989dcbd603967254d08cdad5678e622c976e1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/clvp/modeling_clvp.py
@@ -0,0 +1,2022 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+""" PyTorch CLVP model."""
+
+
+import copy
+import math
+from dataclasses import dataclass
+from typing import Dict, Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ...activations import ACT2FN
+from ...generation import GenerationConfig
+from ...modeling_attn_mask_utils import _prepare_4d_attention_mask, _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ BaseModelOutputWithPooling,
+ CausalLMOutputWithCrossAttentions,
+)
+from ...modeling_utils import PreTrainedModel, SequenceSummary
+from ...pytorch_utils import Conv1D
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_clvp import (
+ ClvpConfig,
+ ClvpDecoderConfig,
+ ClvpEncoderConfig,
+)
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "susnato/clvp_dev"
+
+
+from ..deprecated._archive_maps import CLVP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.clip.modeling_clip.contrastive_loss
+def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
+
+
+# Copied from transformers.models.clip.modeling_clip.clip_loss with clip->clvp, image_loss->speech_loss
+def clvp_loss(similarity: torch.Tensor) -> torch.Tensor:
+ caption_loss = contrastive_loss(similarity)
+ speech_loss = contrastive_loss(similarity.t())
+ return (caption_loss + speech_loss) / 2.0
+
+
+# Copied from transformers.models.llama.modeling_llama.rotate_half
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+def apply_rotary_pos_emb(q, k, v, cos, sin, position_ids, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`):
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
+ used to pass offsetted position ids when working with a KV-cache.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ v_embed = (v * cos) + (rotate_half(v) * sin)
+ return q_embed, k_embed, v_embed
+
+
+def _pad_extra_bos_eos_tokens(
+ input_ids,
+ attention_mask=None,
+ pad_token_id=0,
+ bos_token_id=255,
+ eos_token_id=0,
+ add_bos_token=True,
+ add_eos_token=True,
+):
+ """
+ This method adds extra bos and eos tokens to input_ids and accordingly modifies the attention_mask which is used in
+ `ClvpConditioningEncoder` and the generation loop of the `ClvpModelForConditionalGeneration`.
+ """
+
+ # add the bos token at the beginning
+ if add_bos_token:
+ input_ids = torch.nn.functional.pad(input_ids, (1, 0), value=bos_token_id)
+ attention_mask = (
+ torch.nn.functional.pad(attention_mask, (1, 0), value=1) if attention_mask is not None else attention_mask
+ )
+
+ modified_input_ids = input_ids
+ if add_eos_token:
+ modified_input_ids = torch.zeros(
+ (input_ids.shape[0], input_ids.shape[1] + 1), dtype=input_ids.dtype, device=input_ids.device
+ )
+ for i, each_input_id in enumerate(input_ids):
+ # locate where the valid tokens end and then add the eos token
+ if torch.isin(each_input_id, pad_token_id).sum():
+ pos = torch.where(each_input_id == pad_token_id)[0].min()
+ modified_input_ids[i] = torch.concatenate(
+ [each_input_id[:pos], torch.tensor([eos_token_id], device=input_ids.device), each_input_id[pos:]]
+ )
+ else:
+ # if there are no pad tokens present, then add eos to the end
+ modified_input_ids[i] = torch.nn.functional.pad(each_input_id, (0, 1), value=eos_token_id)
+ attention_mask = (
+ torch.nn.functional.pad(attention_mask, (1, 0), value=1) if attention_mask is not None else attention_mask
+ )
+
+ return modified_input_ids, attention_mask
+
+
+@dataclass
+class ClvpEncoderOutput(ModelOutput):
+ """
+ Base class for CLVP encoder's outputs that contains a pooling of the last hidden states as well as a projection
+ output (a linear layer on top of the pooled output).
+
+ Args:
+ embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)`, *optional*, returned when model is initialized with `with_projection=True`):
+ The embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ The hidden state of the last layer of the model.
+ pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
+ Pooled output of the `last_hidden_state`.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of
+ the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in
+ the self-attention heads.
+ """
+
+ embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ pooler_output: Optional[torch.FloatTensor] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class ClvpOutput(ModelOutput):
+ """
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for speech-text similarity.
+ speech_ids (`torch.LongTensor`, *optional*):
+ speech_ids (or speech candidates) generated by the `ClvpForCausalLM` model.
+ logits_per_speech (`torch.FloatTensor` of shape `(speech_batch_size, text_batch_size)`):
+ The scaled dot product scores between `speech_embeds` and `text_embeds`. This represents the speech-text
+ similarity scores.
+ logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, speech_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `speech_embeds`. This represents the text-speech
+ similarity scores.
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output of the text encoder
+ model.
+ speech_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The speech embeddings obtained by applying the projection layer to the pooled output of the speech encoder
+ model.
+ text_model_output (`BaseModelOutputWithPooling`):
+ The pooled output of the `last_hidden_state` of the text encoder Model.
+ speech_model_output (`BaseModelOutputWithPooling`):
+ The pooled output of the `last_hidden_state` of the speech encoder Model.
+ decoder_hidden_states (`torch.FloatTensor`, *optional*):
+ The hidden states of the decoder model.
+ text_encoder_hidden_states (`torch.FloatTensor`, *optional*):
+ The hidden states of the text encoder model.
+ speech_encoder_hidden_states (`torch.FloatTensor`, *optional*):
+ The hidden states of the speech encoder model.
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ speech_ids: Optional[torch.LongTensor] = None
+ logits_per_speech: torch.FloatTensor = None
+ logits_per_text: torch.FloatTensor = None
+ text_embeds: torch.FloatTensor = None
+ speech_embeds: torch.FloatTensor = None
+ text_model_output: BaseModelOutputWithPooling = None
+ speech_model_output: BaseModelOutputWithPooling = None
+ decoder_hidden_states: torch.FloatTensor = None
+ text_encoder_hidden_states: torch.FloatTensor = None
+ speech_encoder_hidden_states: torch.FloatTensor = None
+
+
+# Copied from transformers.models.llama.modeling_llama.LlamaRMSNorm with Llama->Clvp
+class ClvpRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ ClvpRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ input_dtype = hidden_states.dtype
+ hidden_states = hidden_states.to(torch.float32)
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+ return self.weight * hidden_states.to(input_dtype)
+
+
+class ClvpRotaryPositionalEmbedding(nn.Module):
+ """
+ Rotary Position Embedding Class for CLVP. It was proposed in the paper 'ROFORMER: ENHANCED TRANSFORMER WITH ROTARY
+ POSITION EMBEDDING', Please see https://arxiv.org/pdf/2104.09864v1.pdf .
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ dim = max(config.projection_dim // (config.num_attention_heads * 2), 32)
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
+
+ self.register_buffer("inv_freq", inv_freq)
+ self.cached_sequence_length = None
+ self.cached_rotary_positional_embedding = None
+
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
+ sequence_length = hidden_states.shape[1]
+
+ if sequence_length == self.cached_sequence_length and self.cached_rotary_positional_embedding is not None:
+ return self.cached_rotary_positional_embedding
+
+ self.cached_sequence_length = sequence_length
+ time_stamps = torch.arange(sequence_length, device=hidden_states.device).type_as(self.inv_freq)
+ freqs = torch.einsum("i,j->ij", time_stamps, self.inv_freq)
+ embeddings = torch.cat((freqs, freqs), dim=-1)
+
+ self.cached_rotary_positional_embedding = embeddings.unsqueeze(0)
+ return self.cached_rotary_positional_embedding
+
+
+class ClvpSelfAttention(nn.Module):
+ """
+ Multi-headed attention to combine Absolute and Rotary Positional Embeddings into a single Attention module.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = config.attention_dropout
+
+ if hasattr(config, "max_position_embeddings"):
+ max_positions = config.max_position_embeddings
+ bias = torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool))
+ bias = bias.view(1, 1, max_positions, max_positions)
+ self.register_buffer("bias", bias, persistent=False)
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=config.use_attention_bias)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
+
+ # Copied from transformers.models.clip.modeling_clip.CLIPAttention._shape
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ rotary_pos_emb: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ use_cache: Optional[bool] = False,
+ head_mask: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor, Optional[torch.FloatTensor], Optional[Tuple[torch.FloatTensor]]]:
+ # Raise error when position_ids is None but rotary_pos_emb is provided, because we need that when applying
+ # rotary_pos_emb to query and key states.
+ if rotary_pos_emb is not None and position_ids is None:
+ raise ValueError("`position_ids` must be provided when `rotary_pos_emb` is not None.")
+
+ bsz, _, embed_dim = hidden_states.size()
+
+ # get query proj
+ query_states = self._shape(self.q_proj(hidden_states), -1, bsz) * self.scale
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if past_key_value is not None:
+ past_key, past_value = past_key_value
+ key_states = torch.cat((past_key, key_states), dim=-2)
+ value_states = torch.cat((past_value, value_states), dim=-2)
+
+ if use_cache is True:
+ present = (key_states, value_states)
+ else:
+ present = None
+
+ if rotary_pos_emb is not None:
+ rotary_emb_dim = rotary_pos_emb.shape[-1]
+
+ # Partial rotary embedding
+ query_rot, query_pass = (
+ query_states[..., :rotary_emb_dim],
+ query_states[..., rotary_emb_dim:],
+ )
+ key_rot, key_pass = (
+ key_states[..., :rotary_emb_dim],
+ key_states[..., rotary_emb_dim:],
+ )
+ value_rot, value_pass = (
+ value_states[..., :rotary_emb_dim],
+ value_states[..., rotary_emb_dim:],
+ )
+
+ cos, sin = rotary_pos_emb.cos().squeeze(0), rotary_pos_emb.sin().squeeze(0)
+ query_rot, key_rot, value_rot = apply_rotary_pos_emb(query_rot, key_rot, value_rot, cos, sin, position_ids)
+
+ # [batch_size, num_heads, seq_length, head_dim]
+ query_states = torch.cat((query_rot, query_pass), dim=-1)
+ key_states = torch.cat((key_rot, key_pass), dim=-1)
+ value_states = torch.cat((value_rot, value_pass), dim=-1)
+
+ tgt_len = query_states.shape[2]
+ src_len = key_states.shape[2]
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3))
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights + attention_mask
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ # Mask heads if we want to
+ if head_mask is not None:
+ attn_weights = attn_weights * head_mask
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+ attn_output = torch.matmul(attn_probs, value_states)
+
+ if attn_output.size() != (bsz, self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2).contiguous()
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ if not output_attentions:
+ attn_weights = None
+
+ return attn_output, present, attn_weights
+
+
+class ClvpGatedLinearUnit(nn.Module):
+ """
+ `ClvpGatedLinearUnit` uses the second half of the `hidden_states` to act as a gate for the first half of the
+ `hidden_states` which controls the flow of data from the first of the tensor.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.proj = nn.Linear(config.hidden_size, config.intermediate_size * 2)
+
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
+ hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1)
+ return hidden_states * self.activation_fn(gate)
+
+
+class ClvpEncoderMLP(nn.Module):
+ """
+ This MLP is used in CLVP speech or text encoder models.
+ """
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+
+ self.fc1 = ClvpGatedLinearUnit(config)
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.dropout_layer = nn.Dropout(config.dropout)
+
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.dropout_layer(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+class ClvpEncoderLayer(nn.Module):
+ def __init__(self, config: ClvpConfig):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.self_attn = ClvpSelfAttention(config)
+ self.mlp = ClvpEncoderMLP(config)
+
+ self.input_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.post_attention_rmsnorm = ClvpRMSNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.FloatTensor,
+ rotary_pos_emb: torch.FloatTensor,
+ attention_mask: torch.LongTensor,
+ position_ids: torch.LongTensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch, seq_len, embed_dim)`):
+ input to the layer.
+ rotary_pos_emb (`torch.FloatTensor`):
+ rotary position embeddings generated by `ClvpRotaryPositionalEmbedding` module.
+ attention_mask (`torch.FloatTensor` of shape `(batch, 1, tgt_len, src_len)`):
+ attention mask where padding elements are indicated by very large negative values.
+ position_ids (`torch.LongTensor`):
+ Denotes position ids of the input tokens.
+ output_attentions (`bool`, *optional*, defaults to `False`):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.input_rmsnorm(hidden_states)
+
+ attention_outputs = self.self_attn(
+ hidden_states=hidden_states,
+ rotary_pos_emb=rotary_pos_emb,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = attention_outputs[0]
+
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.post_attention_rmsnorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attention_outputs[-1],)
+
+ return outputs
+
+
+# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP with GPT2->ClvpDecoderMLP
+class ClvpDecoderMLP(nn.Module):
+ def __init__(self, intermediate_size, config):
+ super().__init__()
+ embed_dim = config.hidden_size
+ self.c_fc = Conv1D(intermediate_size, embed_dim)
+ self.c_proj = Conv1D(embed_dim, intermediate_size)
+ self.act = ACT2FN[config.activation_function]
+ self.dropout = nn.Dropout(config.resid_pdrop)
+
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
+ hidden_states = self.c_fc(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.c_proj(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ return hidden_states
+
+
+class ClvpDecoderLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ hidden_size = config.hidden_size
+ inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
+
+ self.input_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
+ self.attn = ClvpSelfAttention(config)
+ self.post_attention_layernorm = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
+
+ self.mlp = ClvpDecoderMLP(inner_dim, config)
+
+ def forward(
+ self,
+ hidden_states: Optional[Tuple[torch.FloatTensor]],
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = False,
+ output_attentions: Optional[bool] = False,
+ ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
+ residual = hidden_states
+ hidden_states = self.input_layernorm(hidden_states)
+ attn_outputs = self.attn(
+ hidden_states,
+ past_key_value=past_key_value,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+ attn_output = attn_outputs[0]
+ outputs = attn_outputs[1:]
+ # residual connection
+ hidden_states = attn_output + residual
+
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ feed_forward_hidden_states = self.mlp(hidden_states)
+ # residual connection
+ hidden_states = residual + feed_forward_hidden_states
+
+ if use_cache:
+ outputs = (hidden_states,) + outputs
+ else:
+ outputs = (hidden_states,) + outputs[1:]
+
+ return outputs
+
+
+class ClvpConditioningEncoder(nn.Module):
+ """
+ This class processes the log-mel spectrograms(extracted by the Feature Extractor) and text tokens(produced by the
+ tokenizer) as inputs for the decoder model.
+
+ First each log-mel spectrogram is processed into a single vector which captures valuable characteristics from each
+ of them, then the text tokens are converted into token embeddings and position embeddings are added afterwards.
+ Both of these vectors are concatenated and then passed to the decoder model.
+
+ The text tokens helps to incorporate the "text information" and the log-mel spectrogram is used to specify the
+ "voice characteristics" into the generated mel tokens.
+ """
+
+ def __init__(self, config: ClvpConfig):
+ super().__init__()
+
+ self.text_config = config.text_config
+ self.decoder_config = config.decoder_config
+
+ self.text_token_embedding = nn.Embedding(self.text_config.vocab_size, self.decoder_config.hidden_size)
+ self.text_position_embedding = nn.Embedding(
+ self.decoder_config.max_text_tokens, self.decoder_config.hidden_size
+ )
+
+ self.mel_conv = nn.Conv1d(self.decoder_config.feature_size, self.decoder_config.hidden_size, kernel_size=1)
+
+ # define group norms to be used before each attention layer
+ num_groups = self.compute_groupnorm_groups(self.decoder_config.hidden_size)
+ self.group_norms = nn.ModuleList(
+ [
+ nn.GroupNorm(num_groups, self.decoder_config.hidden_size, eps=1e-5, affine=True)
+ for _ in range(self.decoder_config.num_mel_attn_blocks)
+ ]
+ )
+
+ # define the attention layers
+ self.mel_attn_blocks = nn.ModuleList(
+ [ClvpSelfAttention(self.decoder_config) for _ in range(self.decoder_config.num_mel_attn_blocks)]
+ )
+
+ self.gradient_checkpointing = False
+
+ def compute_groupnorm_groups(self, channels: int, groups: int = 32):
+ """
+ Calculates the value of `num_groups` for nn.GroupNorm. This logic is taken from the official tortoise
+ repository. link :
+ https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/models/arch_util.py#L26
+ """
+ if channels <= 16:
+ groups = 8
+ elif channels <= 64:
+ groups = 16
+ while channels % groups != 0:
+ groups = int(groups / 2)
+
+ if groups <= 2:
+ raise ValueError(
+ f"Number of groups for the GroupNorm must be greater than 2, but it is {groups}."
+ f"Please consider using a different `hidden_size`"
+ )
+
+ return groups
+
+ def forward(
+ self,
+ input_features: torch.FloatTensor,
+ input_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ ):
+ # process text
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.size()
+ elif inputs_embeds is not None:
+ batch_size, seq_length = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ # construct attention mask if not given
+ if attention_mask is None:
+ attention_mask = torch.ones([batch_size, seq_length], dtype=torch.long, device=input_ids.device)
+
+ # We add bos and eos input_ids in the modeling file instead of the tokenizer file to keep the logic simple
+ # This logic is specific to ClvpConditioningEncoder and not used by other modules.
+ input_ids, attention_mask = _pad_extra_bos_eos_tokens(
+ input_ids,
+ attention_mask,
+ bos_token_id=self.text_config.bos_token_id,
+ eos_token_id=self.text_config.eos_token_id,
+ )
+
+ inputs_embeds = self.text_token_embedding(input_ids)
+ position_ids = attention_mask.cumsum(-1) - 1
+ position_embeds = self.text_position_embedding(position_ids)
+ text_embeds = inputs_embeds + position_embeds
+
+ if self.gradient_checkpointing and self.training:
+ # process each log-mel spectrogram into a single vector
+ mel_spec = torch.utils.checkpoint.checkpoint(self.mel_conv, input_features)
+
+ for i, mel_attn_block in enumerate(self.mel_attn_blocks):
+ residual_mel_spec = mel_spec.transpose(1, 2)
+
+ mel_spec = torch.utils.checkpoint.checkpoint(self.group_norms[i], mel_spec).transpose(1, 2)
+ mel_spec = torch.utils.checkpoint.checkpoint(mel_attn_block, mel_spec)[0] + residual_mel_spec
+ mel_spec = mel_spec.transpose(1, 2)
+
+ else:
+ # process each log-mel spectrogram into a single vector
+ mel_spec = self.mel_conv(input_features)
+
+ for i, mel_attn_block in enumerate(self.mel_attn_blocks):
+ residual_mel_spec = mel_spec.transpose(1, 2)
+
+ mel_spec = self.group_norms[i](mel_spec).transpose(1, 2)
+ mel_spec = mel_attn_block(mel_spec)[0] + residual_mel_spec
+ mel_spec = mel_spec.transpose(1, 2)
+
+ mel_spec = mel_spec[:, :, 0]
+ mel_spec = mel_spec.unsqueeze(1)
+
+ # repeat if there is either (1 text vs N audios) or (N texts vs 1 audio)
+ if text_embeds.shape[0] == 1 and mel_spec.shape[0] != 1:
+ text_embeds = text_embeds.repeat(mel_spec.shape[0], 1, 1)
+ elif text_embeds.shape[0] != 1 and mel_spec.shape[0] == 1:
+ mel_spec = mel_spec.repeat(text_embeds.shape[0], 1, 1)
+ # If there is N texts and M audios we will raise error since the number of text and audio must be same.
+ elif text_embeds.shape[0] != mel_spec.shape[0]:
+ raise ValueError(
+ f"The number of texts and number of audios must be same. "
+ f"Found {text_embeds.shape[0]} texts vs {mel_spec.shape[0]} audios"
+ )
+
+ return torch.concat([mel_spec, text_embeds], dim=1)
+
+
+class ClvpPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = ClvpConfig
+ base_model_prefix = "clvp"
+ supports_gradient_checkpointing = True
+ _skip_keys_device_placement = "past_key_values"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ factor = self.config.initializer_factor
+ if isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=factor * 0.02)
+ elif isinstance(module, (nn.Linear, Conv1D, nn.Conv1d)):
+ module.weight.data.normal_(mean=0.0, std=factor * 0.02)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, ClvpEncoderMLP):
+ factor = self.config.initializer_factor
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
+ nn.init.normal_(module.fc1.proj.weight if getattr(module.fc1, "proj") else module.fc1.weight, std=fc_std)
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
+ elif isinstance(module, ClvpEncoder):
+ config = self.config.text_config if hasattr(self.config, "text_config") else self.config
+ factor = config.initializer_factor
+ module.projection.weight.data.normal_(mean=0.0, std=factor * (config.hidden_size**-0.5))
+ elif isinstance(module, ClvpConditioningEncoder):
+ module.mel_conv.weight.data.normal_(mean=0.0, std=factor)
+ module.mel_conv.bias.data.zero_()
+ elif isinstance(module, ClvpForCausalLM):
+ for name, p in module.named_parameters():
+ if name == "c_proj.weight":
+ p.data.normal_(
+ mean=0.0, std=(self.config.initializer_range / math.sqrt(2 * self.config.num_hidden_layers))
+ )
+ if isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+CLVP_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`ClvpConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+CLVP_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`):
+ Indicates log mel-spectrogram representations for audio returned by [`ClvpFeatureExtractor`].
+ conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
+ inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`.
+ text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
+ inputs_embeds for the text encoder model passed in place of `input_ids`.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding text token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ return_loss (`bool`, *optional*):
+ Whether or not to return the contrastive loss.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+CLVP_DECODER_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for
+ `past_key_values`. In other words, the `attention_mask` always has to have the length:
+ `len(past_key_values) + len(input_ids)`
+
+ [What are attention masks?](../glossary#attention-mask)
+ token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
+ Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
+ 1]`:
+
+ - 0 corresponds to a *sentence A* token,
+ - 1 corresponds to a *sentence B* token.
+
+ [What are token type IDs?](../glossary#token-type-ids)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+
+ If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
+ `past_key_values`).
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class ClvpEncoder(ClvpPreTrainedModel):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
+ [`ClvpEncoderLayer`].
+
+ Args:
+ config: ClvpConfig
+ """
+
+ def __init__(self, config: ClvpConfig):
+ super().__init__(config)
+
+ self.config = config
+ self.token_embedding = nn.Embedding(config.vocab_size, config.hidden_size)
+ self.rotary_pos_emb = ClvpRotaryPositionalEmbedding(config) if config.use_rotary_embedding else None
+ self.layers = nn.ModuleList([ClvpEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+
+ self.sequence_summary = SequenceSummary(config)
+ self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
+
+ self.gradient_checkpointing = False
+
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.token_embedding
+
+ def set_input_embeddings(self, value):
+ self.token_embedding = value
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ input embeddings for the model. This bypasses the model's internal embedding lookup matrix.
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor`, *optional*):
+ Denotes the position ids of `input_ids`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ inputs_embeds = self.token_embedding(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ # expand attention_mask and create position_ids if needed
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, inputs_embeds.dtype)
+
+ if position_ids is None:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+ position_ids = torch.arange(input_shape[1], dtype=torch.long, device=device)
+ position_ids = position_ids.unsqueeze(0)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ rotary_pos_emb = self.rotary_pos_emb(inputs_embeds) if self.rotary_pos_emb is not None else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = torch.utils.checkpoint.checkpoint(
+ encoder_layer.__call__,
+ hidden_states,
+ rotary_pos_emb,
+ attention_mask,
+ position_ids,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ rotary_pos_emb,
+ attention_mask,
+ position_ids,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ last_hidden_state = hidden_states
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
+
+ # take the mean over axis 1 and get pooled output
+ pooled_output = self.sequence_summary(last_hidden_state)
+
+ # apply the projection layer
+ embeds = self.projection(pooled_output)
+
+ if not return_dict:
+ return tuple(
+ v for v in [embeds, last_hidden_state, pooled_output, encoder_states, all_attentions] if v is not None
+ )
+
+ return ClvpEncoderOutput(
+ embeds=embeds,
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_states,
+ attentions=all_attentions,
+ )
+
+
+class ClvpDecoder(ClvpPreTrainedModel):
+ """
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`ClvpDecoderLayer`]
+ """
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.config = config
+
+ self.input_embeds_layer = nn.Embedding(self.config.vocab_size, self.config.hidden_size)
+ self.position_embeds_layer = nn.Embedding(self.config.max_position_embeddings, self.config.hidden_size)
+
+ self.drop = nn.Dropout(self.config.embd_pdrop)
+ self.layers = nn.ModuleList([ClvpDecoderLayer(self.config) for _ in range(self.config.num_hidden_layers)])
+ self.layer_norm = nn.LayerNorm(self.config.hidden_size, eps=self.config.layer_norm_epsilon)
+
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.input_embeds_layer
+
+ def set_input_embeddings(self, new_embeddings):
+ self.input_embeds_layer = new_embeddings
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
+ """
+ for layer, heads in heads_to_prune.items():
+ self.layers[layer].attn.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+ input_ids.shape[0]
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ inputs_embeds.shape[0]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids.view(-1, input_shape[-1])
+
+ if past_key_values is None:
+ past_key_values_length = 0
+ past_key_values = tuple([None] * len(self.layers))
+ else:
+ past_key_values_length = past_key_values[0][0].size(-2)
+ if position_ids is None:
+ position_ids = torch.arange(
+ past_key_values_length, input_shape[-1] + past_key_values_length, dtype=torch.long, device=device
+ )
+ position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])
+
+ if inputs_embeds is None:
+ inputs_embeds = self.input_embeds_layer(input_ids)
+ position_embeds = self.position_embeds_layer(position_ids)
+ inputs_embeds = inputs_embeds + position_embeds
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x num_attention_heads x N x N
+ # head_mask has shape num_hidden_layers x batch x num_attention_heads x N x N
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+
+ hidden_states = inputs_embeds
+
+ if token_type_ids is not None:
+ token_type_embeds = self.input_embeds_layer(token_type_ids)
+ hidden_states = hidden_states + token_type_embeds
+
+ hidden_states = self.drop(hidden_states)
+
+ output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ presents = () if use_cache else None
+ all_self_attentions = () if output_attentions else None
+ all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
+ all_hidden_states = () if output_hidden_states else None
+ for i, (block, past_key_value) in enumerate(zip(self.layers, past_key_values)):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ outputs = torch.utils.checkpoint.checkpoint(
+ block.__call__,
+ hidden_states,
+ None,
+ attention_mask,
+ position_ids,
+ head_mask[i],
+ )
+ else:
+ outputs = block(
+ hidden_states,
+ past_key_value=past_key_value,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask[i],
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = outputs[0]
+ if use_cache is True:
+ presents = presents + (outputs[1],)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
+ if self.config.add_cross_attention:
+ all_cross_attentions = all_cross_attentions + (outputs[3 if use_cache else 2],)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ hidden_states = hidden_states.view(output_shape)
+
+ # Add last hidden state
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, presents, all_hidden_states, all_self_attentions, all_cross_attentions]
+ if v is not None
+ )
+
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=presents,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The bare Clvp decoder model outputting raw hidden-states without any specific head on top.",
+ CLVP_START_DOCSTRING,
+)
+class ClvpModel(ClvpPreTrainedModel):
+ def __init__(self, config: ClvpDecoderConfig):
+ super().__init__(config)
+ self.config = config
+ self.decoder = ClvpDecoder(self.config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.decoder.input_embeds_layer
+
+ def set_input_embeddings(self, value):
+ self.decoder.input_embeds_layer = value
+
+ def get_decoder(self):
+ return self.decoder
+
+ @add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs
+
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ hidden_states=decoder_outputs.hidden_states,
+ attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ "The CLVP decoder model with a language modelling head on top.",
+ CLVP_START_DOCSTRING,
+)
+class ClvpForCausalLM(ClvpPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.config = config
+ self.model = ClvpModel(self.config)
+
+ self.final_norm = nn.LayerNorm(self.config.hidden_size)
+ self.lm_head = nn.Linear(self.config.hidden_size, self.config.vocab_size, bias=True)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.decoder.input_embeds_layer
+
+ def set_input_embeddings(self, new_embeddings):
+ self.model.decoder.input_embeds_layer = new_embeddings
+
+ def _prepare_model_inputs(
+ self,
+ inputs: Optional[torch.Tensor] = None,
+ bos_token_id: Optional[int] = None,
+ model_kwargs: Optional[Dict[str, torch.Tensor]] = None,
+ ) -> Tuple[torch.Tensor, Optional[str], Dict[str, torch.Tensor]]:
+ """
+ This function extracts the model-specific `inputs` for generation.
+ """
+ input_name = self.main_input_name
+
+ model_kwargs = {k: v for k, v in model_kwargs.items() if v is not None}
+
+ inputs_kwarg = model_kwargs.pop(input_name, None)
+ if inputs_kwarg is not None and inputs is not None:
+ raise ValueError(
+ f"`inputs`: {inputs}` were passed alongside {input_name} which is not allowed."
+ f"Make sure to either pass {inputs} or {input_name}=..."
+ )
+ elif inputs_kwarg is not None:
+ inputs = inputs_kwarg
+
+ if input_name == "input_ids" and "inputs_embeds" in model_kwargs:
+ model_kwargs["input_ids"] = self._maybe_initialize_input_ids_for_generation(
+ inputs, bos_token_id, model_kwargs=model_kwargs
+ )
+ inputs, input_name = model_kwargs["inputs_embeds"], "inputs_embeds"
+
+ # Check if conditioning_embeds are provided or not, if yes then concatenate the bos_token_id at the end of the conditioning_embeds.
+ # Then we must subtract the positional_ids because during the forward pass it will be added anyways, so we must cancel them out here.
+ conditioning_embeds = model_kwargs.get("conditioning_embeds", None)
+
+ if conditioning_embeds is not None:
+ mel_start_token_embedding = self.model.decoder.input_embeds_layer(
+ torch.full(
+ (conditioning_embeds.shape[0], 1),
+ fill_value=self.config.bos_token_id,
+ device=conditioning_embeds.device,
+ )
+ )
+ mel_start_token_embedding += self.model.decoder.position_embeds_layer(
+ torch.full((conditioning_embeds.shape[0], 1), fill_value=0, device=conditioning_embeds.device)
+ )
+ conditioning_embeds = torch.concat([conditioning_embeds, mel_start_token_embedding], dim=1)
+
+ # subtract the positional_ids here
+ if hasattr(model_kwargs, "attention_mask"):
+ position_ids = model_kwargs["attention_mask"].long().cumsum(-1) - 1
+ else:
+ position_ids = torch.range(
+ 0, conditioning_embeds.shape[1] - 1, dtype=torch.long, device=conditioning_embeds.device
+ )
+ position_ids = position_ids.unsqueeze(0).repeat(conditioning_embeds.shape[0], 1)
+
+ model_kwargs["inputs_embeds"] = conditioning_embeds - self.model.decoder.position_embeds_layer(
+ position_ids
+ )
+ model_kwargs["input_ids"] = (
+ torch.ones((model_kwargs["inputs_embeds"].shape[0], 1), dtype=torch.long, device=self.device)
+ * self.config.bos_token_id
+ )
+
+ return model_kwargs["inputs_embeds"], "inputs_embeds", model_kwargs
+
+ inputs = self._maybe_initialize_input_ids_for_generation(inputs, bos_token_id, model_kwargs)
+ return inputs, input_name, model_kwargs
+
+ def prepare_inputs_for_generation(
+ self, input_ids, past_key_values=None, inputs_embeds=None, conditioning_embeds=None, **kwargs
+ ):
+ input_ids_length = input_ids.shape[-1]
+ token_type_ids = kwargs.get("token_type_ids", None)
+ # only last token for inputs_ids if past is defined in kwargs
+ if past_key_values:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids[:, -input_ids.shape[1] :]
+
+ attention_mask = kwargs.get("attention_mask", None)
+ position_ids = kwargs.get("position_ids", None)
+
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -1].unsqueeze(-1)
+ else:
+ position_ids = None
+
+ if conditioning_embeds is not None and past_key_values is not None:
+ position_ids = torch.tensor([input_ids_length], dtype=torch.long, device=input_ids.device)
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "position_ids": position_ids,
+ "token_type_ids": token_type_ids,
+ }
+ )
+ return model_inputs
+
+ @add_start_docstrings_to_model_forward(CLVP_DECODER_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.model(
+ input_ids=input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ token_type_ids=token_type_ids,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+
+ lm_logits = self.final_norm(hidden_states)
+ lm_logits = self.lm_head(lm_logits)
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(lm_logits.device)
+ # Shift so that tokens < n predict n
+ shift_logits = lm_logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ @staticmethod
+ # Copied from transformers.models.gpt2.modeling_gpt2.GPT2LMHeadModel._reorder_cache
+ def _reorder_cache(
+ past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor
+ ) -> Tuple[Tuple[torch.Tensor]]:
+ """
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
+ beam_idx at every generation step.
+ """
+ return tuple(
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past)
+ for layer_past in past_key_values
+ )
+
+
+@add_start_docstrings(
+ "The composite CLVP model with a text encoder, speech encoder and speech decoder model."
+ "The speech decoder model generates the speech_ids from the text and the text encoder and speech encoder works"
+ "together to filter out the best speech_ids.",
+ CLVP_START_DOCSTRING,
+)
+class ClvpModelForConditionalGeneration(ClvpPreTrainedModel):
+ config_class = ClvpConfig
+
+ def __init__(self, config: ClvpConfig):
+ super().__init__(config)
+
+ if not isinstance(config.text_config, ClvpEncoderConfig):
+ raise ValueError(
+ "config.text_config is expected to be of type `ClvpEncoderConfig` but is of type"
+ f" {type(config.text_config)}."
+ )
+
+ if not isinstance(config.speech_config, ClvpEncoderConfig):
+ raise ValueError(
+ "config.speech_config is expected to be of type `ClvpEncoderConfig` but is of type"
+ f" {type(config.speech_config)}."
+ )
+
+ if not isinstance(config.decoder_config, ClvpDecoderConfig):
+ raise ValueError(
+ "config.decoder_config is expected to be of type `ClvpDecoderConfig` but is of type"
+ f" {type(config.decoder_config)}."
+ )
+
+ self.conditioning_encoder = ClvpConditioningEncoder(config)
+
+ self.speech_decoder_model = ClvpForCausalLM(config.decoder_config)
+
+ self.text_encoder_model = ClvpEncoder(config.text_config)
+ self.speech_encoder_model = ClvpEncoder(config.speech_config)
+
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ # taken from the original repo,
+ # link : https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/api.py#L117
+ def fix_speech_decoder_output(self, speech_ids: torch.LongTensor) -> torch.LongTensor:
+ """
+ This method modifies the output of the decoder model, such as replacing the `eos_token_id` and changing the
+ last few tokens of each sequence.
+
+ Args:
+ speech_ids (`torch.LongTensor`):
+ This refers to the output of the decoder model.
+ """
+ decoder_fixing_codes = self.config.decoder_config.decoder_fixing_codes
+ speech_ids = speech_ids[:, 1:]
+
+ stop_token_indices = torch.where(speech_ids == self.speech_decoder_model.config.eos_token_id, 1, 0)
+ speech_ids = torch.masked_fill(speech_ids, mask=stop_token_indices.bool(), value=decoder_fixing_codes[0])
+
+ for i, each_seq_stop_token_index in enumerate(stop_token_indices):
+ # This means that no stop tokens were found so the sentence was still being generated, in that case we don't need
+ # to apply any padding so just skip to the next sequence of tokens.
+ if each_seq_stop_token_index.sum() == 0:
+ continue
+
+ stm = each_seq_stop_token_index.argmax()
+ speech_ids[i, stm:] = decoder_fixing_codes[0]
+ if stm - 3 < speech_ids.shape[1]:
+ speech_ids[i, -3:] = torch.tensor(
+ [decoder_fixing_codes[1:]], device=speech_ids.device, dtype=torch.long
+ )
+
+ return speech_ids
+
+ def get_text_features(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ This method can be used to extract text_embeds from a text. The text embeddings obtained by applying the
+ projection layer to the pooled output of the CLVP text encoder model.
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
+ provide it.
+
+ [What are input IDs?](../glossary#input-ids)
+ text_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
+ inputs_embeds for the text encoder model passed in place of `input_ids`.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Returns:
+ `torch.FloatTensor` of shape `(batch_size, output_dim)`:
+ The text embeddings obtained by applying the projection layer to the pooled output of the CLVP Text
+ Model.
+
+ Examples:
+
+ ```python
+ >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
+
+ >>> # Define the Text
+ >>> text = "This is an example text."
+
+ >>> # Define processor and model
+ >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
+ >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
+
+ >>> # Generate processor output and text embeds
+ >>> processor_output = processor(text=text, return_tensors="pt")
+ >>> text_embeds = model.get_text_features(input_ids=processor_output["input_ids"])
+ ```
+ """
+
+ outputs = self.text_encoder_model(
+ input_ids=input_ids,
+ inputs_embeds=text_encoder_inputs_embeds,
+ attention_mask=attention_mask,
+ )
+
+ return outputs[0]
+
+ def get_speech_features(
+ self,
+ speech_ids: Optional[torch.LongTensor] = None,
+ input_ids: Optional[torch.LongTensor] = None,
+ input_features: Optional[torch.FloatTensor] = None,
+ conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ generation_config: Optional[GenerationConfig] = None,
+ **kwargs,
+ ) -> torch.FloatTensor:
+ r"""
+ This method can be used to extract speech_embeds. The speech embeddings are obtained by applying the speech
+ model on speech_ids. If speech_ids is not present but both input_ids and input_features are given then the
+ decoder model will be used to first generate the speech_ids and then applying the speech model.
+
+ Args:
+ speech_ids (`torch.LongTensor` of shape `(batch_size, num_speech_ids)`, *optional*):
+ Speech Tokens. Padding will be ignored by default should you provide it. If speech_ids are provided
+ then input_ids and input_features will be automatically ignored.
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Input text Tokens. Processed from the [`ClvpTokenizer`]. If speech_ids is not provided, then input_ids
+ and input_features will be used.
+ input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`, *optional*):
+ Indicates log-melspectrogram representations for audio returned by [`ClvpFeatureExtractor`]. If
+ speech_ids is not provided, then input_ids and input_features will be used.
+ conditioning_encoder_inputs_embeds (`torch.FloatTensor`, *optional*):
+ inputs_embeds for `ClvpConditioningEncoder`. Can be used in place of `input_ids`.
+ attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding speech token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ generation_config (`GenerationConfig`, *optional*):
+ generation config to control the generation of speech_ids if they are not provided.
+
+ Returns:
+ `torch.FloatTensor` of shape `(batch_size, output_dim)`:
+ The speech embeddings obtained by applying the projection layer to the pooled output of the CLVP Speech
+ Model.
+
+ Examples:
+
+ ```python
+ >>> import datasets
+ >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
+
+ >>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library)
+ >>> text = "This is an example text."
+ >>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
+ >>> _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values()
+
+ >>> # Define processor and model
+ >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
+ >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
+
+ >>> # Generate processor output and model output
+ >>> processor_output = processor(raw_speech=audio, sampling_rate=sr, text=text, return_tensors="pt")
+ >>> speech_embeds = model.get_speech_features(
+ ... input_ids=processor_output["input_ids"], input_features=processor_output["input_features"]
+ ... )
+ ```
+ """
+
+ if speech_ids is None:
+ if (input_ids is None and conditioning_encoder_inputs_embeds is None) or input_features is None:
+ raise ValueError(
+ "Either speech_ids or input_ids/conditioning_encoder_inputs_embeds and input_features must be provided."
+ )
+
+ if generation_config is None:
+ generation_config = self.generation_config
+ generation_config.update(**kwargs)
+
+ conditioning_embeds = self.conditioning_encoder(
+ input_features=input_features,
+ input_ids=input_ids,
+ inputs_embeds=conditioning_encoder_inputs_embeds,
+ attention_mask=attention_mask,
+ )
+
+ speech_ids = self.speech_decoder_model.generate(
+ conditioning_embeds=conditioning_embeds,
+ generation_config=generation_config,
+ )
+
+ speech_ids = self.fix_speech_decoder_output(speech_ids[0])
+
+ outputs = self.speech_encoder_model(
+ input_ids=speech_ids,
+ attention_mask=attention_mask,
+ )
+
+ return outputs[0]
+
+ @add_start_docstrings_to_model_forward(CLVP_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=ClvpOutput, config_class=ClvpConfig)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ input_features: torch.FloatTensor = None,
+ conditioning_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ text_encoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ return_loss: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, ClvpOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> import datasets
+ >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration
+
+ >>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library)
+ >>> text = "This is an example text."
+
+ >>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050))
+ >>> _, audio, sr = ds.sort("id").select(range(1))[:1]["audio"][0].values()
+
+ >>> # Define processor and model
+ >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev")
+ >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev")
+
+ >>> # processor outputs and model outputs
+ >>> processor_output = processor(raw_speech=audio, sampling_rate=sr, text=text, return_tensors="pt")
+ >>> outputs = model(
+ ... input_ids=processor_output["input_ids"],
+ ... input_features=processor_output["input_features"],
+ ... return_dict=True,
+ ... )
+ ```
+ """
+
+ # Use CLVP model's config for some fields (if specified) instead of those of speech & text components.
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ conditioning_embeds = self.conditioning_encoder(
+ input_features=input_features,
+ input_ids=input_ids,
+ inputs_embeds=conditioning_encoder_inputs_embeds,
+ attention_mask=attention_mask,
+ )
+
+ decoder_outputs = self.speech_decoder_model(
+ inputs_embeds=conditioning_embeds,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ speech_ids = decoder_outputs[0]
+
+ # since we will get the embeds of shape `(batch_size, seq_len, embedding_dim)` during the forward pass
+ # we must convert it to tokens, to make it compaitable with speech_transformer
+ if speech_ids.ndim == 3:
+ speech_ids = speech_ids.argmax(2)
+ speech_ids = self.fix_speech_decoder_output(speech_ids)
+
+ speech_outputs = self.speech_encoder_model(
+ input_ids=speech_ids,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ text_outputs = self.text_encoder_model(
+ input_ids=input_ids,
+ inputs_embeds=text_encoder_inputs_embeds,
+ attention_mask=attention_mask,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ speech_embeds = speech_outputs[0]
+ text_embeds = text_outputs[0]
+
+ # normalized features
+ speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True)
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
+
+ # cosine similarity as logits
+ logit_scale = self.logit_scale.exp()
+ logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale
+ logits_per_speech = logits_per_text.t()
+
+ loss = None
+ if return_loss:
+ loss = clvp_loss(logits_per_text)
+
+ if not return_dict:
+ output = (
+ logits_per_speech,
+ logits_per_text,
+ text_embeds,
+ speech_embeds,
+ text_outputs[2],
+ speech_outputs[2],
+ )
+ if output_hidden_states:
+ output += (
+ decoder_outputs[-1],
+ text_outputs[-1],
+ speech_outputs[-1],
+ )
+
+ return ((loss,) + output) if loss is not None else output
+
+ return ClvpOutput(
+ loss=loss,
+ logits_per_speech=logits_per_speech,
+ logits_per_text=logits_per_text,
+ text_embeds=text_embeds,
+ speech_embeds=speech_embeds,
+ text_model_output=text_outputs[2],
+ speech_model_output=speech_outputs[2],
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ text_encoder_hidden_states=text_outputs.hidden_states,
+ speech_encoder_hidden_states=speech_outputs.hidden_states,
+ )
+
+ @torch.no_grad()
+ def generate(
+ self,
+ input_ids: torch.LongTensor = None,
+ input_features: torch.FloatTensor = None,
+ attention_mask: Optional[torch.LongTensor] = None,
+ generation_config: Optional[GenerationConfig] = None,
+ pad_to_max_mel_tokens: Optional[int] = None,
+ output_hidden_states: Optional[bool] = None,
+ **kwargs,
+ ):
+ """
+ Generate method for `ClvpModelForConditionalGeneration`, this method calls the `generate` method of
+ `ClvpForCausalLM` and then uses those generated `speech_ids` to process `text_embeds` and `speech_embeds` using
+ `ClvpEncoder`.
+
+ Args:
+ input_ids (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Input text Tokens. Processed from the [`ClvpTokenizer`].
+ input_features (`torch.FloatTensor` of shape `(batch_size, feature_size, time_dim)`, *optional*):
+ Indicates log-melspectrogram representations for audio returned by [`ClvpFeatureExtractor`].
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding text token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ generation_config (`~generation.GenerationConfig`, *optional*):
+ The generation configuration to be used as base parametrization for the generation call. `**kwargs`
+ passed to generate matching the attributes of `generation_config` will override them. If
+ `generation_config` is not provided, the default will be used, which had the following loading
+ priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
+ configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
+ default values, whose documentation should be checked to parameterize generation.
+ pad_to_max_mel_tokens (`int`, *optional*):
+ Pads generated speech_ids to the specified value. This is to implement the same logic from the official
+ repo, link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L430
+ and to make sure the logits are same.
+ This does not affect generation quality so please don't consider using it since it is less efficient.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of decoder model, text encoder and speech encoder models.
+
+ Returns:
+ `ClvpOutput` or tuple: A `ClvpOutput` (if `return_dict_in_generate=True` or when
+ `config.return_dict_in_generate=True`) or a tuple.
+ """
+
+ # If the input sequences are larger than (self.config.decoder_config.max_text_tokens - 3) then raise error,
+ # because we need to add 3 tokens ( 1 bos tokens and 2 eos tokens) to the input_ids in ClvpConditioningEncoder to
+ # properly sample
+ sequence_length = input_ids.shape[-1]
+ if sequence_length > (self.config.decoder_config.max_text_tokens - 3):
+ raise ValueError(
+ f"Maximum sequence length reached! Found input_ids of length {sequence_length}."
+ f"Please make sure that the maximum length of input_ids is {self.config.decoder_config.max_text_tokens - 3}"
+ )
+
+ if generation_config is None:
+ generation_config = self.generation_config
+
+ generation_config = copy.deepcopy(generation_config)
+ model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
+ generation_config.validate()
+ self._validate_model_kwargs(model_kwargs.copy())
+
+ # pad input_ids as specified in the original repo
+ # link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L380
+ input_ids, attention_mask = _pad_extra_bos_eos_tokens(
+ input_ids,
+ attention_mask,
+ add_bos_token=False,
+ bos_token_id=self.config.text_config.bos_token_id,
+ eos_token_id=self.config.text_config.eos_token_id,
+ )
+
+ conditioning_embeds = self.conditioning_encoder(
+ input_features=input_features,
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ )
+
+ decoder_outputs = self.speech_decoder_model.generate(
+ conditioning_embeds=conditioning_embeds,
+ generation_config=generation_config,
+ output_hidden_states=output_hidden_states,
+ return_dict=generation_config.return_dict_in_generate,
+ )
+ if isinstance(decoder_outputs, ModelOutput):
+ speech_ids = decoder_outputs.sequences
+
+ # pad to pad_to_max_mel_tokens if given, to replicate the original repo logic
+ # link: https://github.com/neonbjb/tortoise-tts/blob/80f89987a5abda5e2b082618cd74f9c7411141dc/tortoise/api.py#L430
+ if pad_to_max_mel_tokens is not None:
+ padding_needed = pad_to_max_mel_tokens - speech_ids.shape[-1]
+ speech_ids = torch.nn.functional.pad(
+ speech_ids, (0, padding_needed), value=self.generation_config.eos_token_id
+ )
+
+ speech_ids = self.fix_speech_decoder_output(speech_ids)
+
+ speech_outputs = self.speech_encoder_model(
+ input_ids=speech_ids,
+ output_hidden_states=output_hidden_states,
+ return_dict=generation_config.return_dict_in_generate,
+ )
+ text_outputs = self.text_encoder_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ output_hidden_states=output_hidden_states,
+ return_dict=generation_config.return_dict_in_generate,
+ )
+
+ speech_embeds = speech_outputs[0]
+ text_embeds = text_outputs[0]
+
+ # normalized features
+ speech_embeds = speech_embeds / speech_embeds.norm(p=2, dim=-1, keepdim=True)
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
+
+ # cosine similarity as logits
+ logit_scale = self.logit_scale.exp()
+ logits_per_text = torch.matmul(text_embeds, speech_embeds.t()) * logit_scale
+ logits_per_speech = logits_per_text.t()
+
+ if not generation_config.return_dict_in_generate:
+ output = (
+ speech_ids,
+ logits_per_speech,
+ logits_per_text,
+ text_embeds,
+ speech_embeds,
+ text_outputs[2],
+ speech_outputs[2],
+ )
+ if output_hidden_states:
+ output += (
+ decoder_outputs[-1],
+ text_outputs[-1],
+ speech_outputs[-1],
+ )
+
+ return output
+
+ return ClvpOutput(
+ speech_ids=speech_ids,
+ logits_per_speech=logits_per_speech,
+ logits_per_text=logits_per_text,
+ text_embeds=text_embeds,
+ speech_embeds=speech_embeds,
+ text_model_output=text_outputs[2],
+ speech_model_output=speech_outputs[2],
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ text_encoder_hidden_states=text_outputs.hidden_states,
+ speech_encoder_hidden_states=speech_outputs.hidden_states,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/number_normalizer.py b/venv/lib/python3.10/site-packages/transformers/models/clvp/number_normalizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..86aa087e8139b0b2fe2e598c2d9ee55a0ddf0389
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/clvp/number_normalizer.py
@@ -0,0 +1,238 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""English Normalizer class for CLVP."""
+
+
+import re
+
+
+class EnglishNormalizer:
+ def __init__(self):
+ # List of (regular expression, replacement) pairs for abbreviations:
+ self._abbreviations = [
+ (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1])
+ for x in [
+ ("mrs", "misess"),
+ ("mr", "mister"),
+ ("dr", "doctor"),
+ ("st", "saint"),
+ ("co", "company"),
+ ("jr", "junior"),
+ ("maj", "major"),
+ ("gen", "general"),
+ ("drs", "doctors"),
+ ("rev", "reverend"),
+ ("lt", "lieutenant"),
+ ("hon", "honorable"),
+ ("sgt", "sergeant"),
+ ("capt", "captain"),
+ ("esq", "esquire"),
+ ("ltd", "limited"),
+ ("col", "colonel"),
+ ("ft", "fort"),
+ ]
+ ]
+
+ self.ones = ["", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"]
+ self.teens = [
+ "ten",
+ "eleven",
+ "twelve",
+ "thirteen",
+ "fourteen",
+ "fifteen",
+ "sixteen",
+ "seventeen",
+ "eighteen",
+ "nineteen",
+ ]
+ self.tens = ["", "", "twenty", "thirty", "forty", "fifty", "sixty", "seventy", "eighty", "ninety"]
+
+ def number_to_words(self, num: int) -> str:
+ """
+ Converts numbers(`int`) to words(`str`).
+
+ Please note that it only supports upto - "'nine hundred ninety-nine quadrillion, nine hundred ninety-nine
+ trillion, nine hundred ninety-nine billion, nine hundred ninety-nine million, nine hundred ninety-nine
+ thousand, nine hundred ninety-nine'" or `number_to_words(999_999_999_999_999_999)`.
+ """
+ if num == 0:
+ return "zero"
+ elif num < 0:
+ return "minus " + self.number_to_words(abs(num))
+ elif num < 10:
+ return self.ones[num]
+ elif num < 20:
+ return self.teens[num - 10]
+ elif num < 100:
+ return self.tens[num // 10] + ("-" + self.number_to_words(num % 10) if num % 10 != 0 else "")
+ elif num < 1000:
+ return (
+ self.ones[num // 100] + " hundred" + (" " + self.number_to_words(num % 100) if num % 100 != 0 else "")
+ )
+ elif num < 1_000_000:
+ return (
+ self.number_to_words(num // 1000)
+ + " thousand"
+ + (", " + self.number_to_words(num % 1000) if num % 1000 != 0 else "")
+ )
+ elif num < 1_000_000_000:
+ return (
+ self.number_to_words(num // 1_000_000)
+ + " million"
+ + (", " + self.number_to_words(num % 1_000_000) if num % 1_000_000 != 0 else "")
+ )
+ elif num < 1_000_000_000_000:
+ return (
+ self.number_to_words(num // 1_000_000_000)
+ + " billion"
+ + (", " + self.number_to_words(num % 1_000_000_000) if num % 1_000_000_000 != 0 else "")
+ )
+ elif num < 1_000_000_000_000_000:
+ return (
+ self.number_to_words(num // 1_000_000_000_000)
+ + " trillion"
+ + (", " + self.number_to_words(num % 1_000_000_000_000) if num % 1_000_000_000_000 != 0 else "")
+ )
+ elif num < 1_000_000_000_000_000_000:
+ return (
+ self.number_to_words(num // 1_000_000_000_000_000)
+ + " quadrillion"
+ + (
+ ", " + self.number_to_words(num % 1_000_000_000_000_000)
+ if num % 1_000_000_000_000_000 != 0
+ else ""
+ )
+ )
+ else:
+ return "number out of range"
+
+ def convert_to_ascii(self, text: str) -> str:
+ """
+ Converts unicode to ascii
+ """
+ return text.encode("ascii", "ignore").decode("utf-8")
+
+ def _expand_dollars(self, m: str) -> str:
+ """
+ This method is used to expand numerical dollar values into spoken words.
+ """
+ match = m.group(1)
+ parts = match.split(".")
+ if len(parts) > 2:
+ return match + " dollars" # Unexpected format
+
+ dollars = int(parts[0]) if parts[0] else 0
+ cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
+ if dollars and cents:
+ dollar_unit = "dollar" if dollars == 1 else "dollars"
+ cent_unit = "cent" if cents == 1 else "cents"
+ return "%s %s, %s %s" % (dollars, dollar_unit, cents, cent_unit)
+ elif dollars:
+ dollar_unit = "dollar" if dollars == 1 else "dollars"
+ return "%s %s" % (dollars, dollar_unit)
+ elif cents:
+ cent_unit = "cent" if cents == 1 else "cents"
+ return "%s %s" % (cents, cent_unit)
+ else:
+ return "zero dollars"
+
+ def _remove_commas(self, m: str) -> str:
+ """
+ This method is used to remove commas from sentences.
+ """
+ return m.group(1).replace(",", "")
+
+ def _expand_decimal_point(self, m: str) -> str:
+ """
+ This method is used to expand '.' into spoken word ' point '.
+ """
+ return m.group(1).replace(".", " point ")
+
+ def _expand_ordinal(self, num: str) -> str:
+ """
+ This method is used to expand ordinals such as '1st', '2nd' into spoken words.
+ """
+ ordinal_suffixes = {1: "st", 2: "nd", 3: "rd"}
+
+ num = int(num.group(0)[:-2])
+ if 10 <= num % 100 and num % 100 <= 20:
+ suffix = "th"
+ else:
+ suffix = ordinal_suffixes.get(num % 10, "th")
+ return self.number_to_words(num) + suffix
+
+ def _expand_number(self, m: str) -> str:
+ """
+ This method acts as a preprocessing step for numbers between 1000 and 3000 (same as the original repository,
+ link :
+ https://github.com/neonbjb/tortoise-tts/blob/4003544b6ff4b68c09856e04d3eff9da26d023c2/tortoise/utils/tokenizer.py#L86)
+ """
+ num = int(m.group(0))
+
+ if num > 1000 and num < 3000:
+ if num == 2000:
+ return "two thousand"
+ elif num > 2000 and num < 2010:
+ return "two thousand " + self.number_to_words(num % 100)
+ elif num % 100 == 0:
+ return self.number_to_words(num // 100) + " hundred"
+ else:
+ return self.number_to_words(num)
+ else:
+ return self.number_to_words(num)
+
+ def normalize_numbers(self, text: str) -> str:
+ """
+ This method is used to normalize numbers within a text such as converting the numbers to words, removing
+ commas, etc.
+ """
+ text = re.sub(re.compile(r"([0-9][0-9\,]+[0-9])"), self._remove_commas, text)
+ text = re.sub(re.compile(r"£([0-9\,]*[0-9]+)"), r"\1 pounds", text)
+ text = re.sub(re.compile(r"\$([0-9\.\,]*[0-9]+)"), self._expand_dollars, text)
+ text = re.sub(re.compile(r"([0-9]+\.[0-9]+)"), self._expand_decimal_point, text)
+ text = re.sub(re.compile(r"[0-9]+(st|nd|rd|th)"), self._expand_ordinal, text)
+ text = re.sub(re.compile(r"[0-9]+"), self._expand_number, text)
+ return text
+
+ def expand_abbreviations(self, text: str) -> str:
+ """
+ Expands the abbreviate words.
+ """
+ for regex, replacement in self._abbreviations:
+ text = re.sub(regex, replacement, text)
+ return text
+
+ def collapse_whitespace(self, text: str) -> str:
+ """
+ Removes multiple whitespaces
+ """
+ return re.sub(re.compile(r"\s+"), " ", text)
+
+ def __call__(self, text):
+ """
+ Converts text to ascii, numbers / number-like quantities to their spelt-out counterparts and expands
+ abbreviations
+ """
+
+ text = self.convert_to_ascii(text)
+ text = text.lower()
+ text = self.normalize_numbers(text)
+ text = self.expand_abbreviations(text)
+ text = self.collapse_whitespace(text)
+ text = text.replace('"', "")
+
+ return text
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py b/venv/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py
new file mode 100644
index 0000000000000000000000000000000000000000..0723986db9757d9ade5719333ad862b09e33685e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/clvp/processing_clvp.py
@@ -0,0 +1,91 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Processor class for CLVP
+"""
+
+
+from ...processing_utils import ProcessorMixin
+
+
+class ClvpProcessor(ProcessorMixin):
+ r"""
+ Constructs a CLVP processor which wraps a CLVP Feature Extractor and a CLVP Tokenizer into a single processor.
+
+ [`ClvpProcessor`] offers all the functionalities of [`ClvpFeatureExtractor`] and [`ClvpTokenizer`]. See the
+ [`~ClvpProcessor.__call__`], [`~ClvpProcessor.decode`] and [`~ClvpProcessor.batch_decode`] for more information.
+
+ Args:
+ feature_extractor (`ClvpFeatureExtractor`):
+ An instance of [`ClvpFeatureExtractor`]. The feature extractor is a required input.
+ tokenizer (`ClvpTokenizer`):
+ An instance of [`ClvpTokenizer`]. The tokenizer is a required input.
+ """
+
+ feature_extractor_class = "ClvpFeatureExtractor"
+ tokenizer_class = "ClvpTokenizer"
+ model_input_names = [
+ "input_ids",
+ "input_features",
+ "attention_mask",
+ ]
+
+ def __init__(self, feature_extractor, tokenizer):
+ super().__init__(feature_extractor, tokenizer)
+
+ def __call__(self, *args, **kwargs):
+ """
+ Forwards the `audio` and `sampling_rate` arguments to [`~ClvpFeatureExtractor.__call__`] and the `text`
+ argument to [`~ClvpTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more
+ information.
+ """
+
+ raw_speech = kwargs.pop("raw_speech", None)
+ sampling_rate = kwargs.pop("sampling_rate", None)
+ text = kwargs.pop("text", None)
+
+ if raw_speech is None and text is None:
+ raise ValueError("You need to specify either an `raw_speech` or `text` input to process.")
+
+ if raw_speech is not None:
+ inputs = self.feature_extractor(raw_speech, sampling_rate=sampling_rate, **kwargs)
+ if text is not None:
+ encodings = self.tokenizer(text, **kwargs)
+
+ if text is None:
+ return inputs
+ elif raw_speech is None:
+ return encodings
+ else:
+ inputs["input_ids"] = encodings["input_ids"]
+ inputs["attention_mask"] = encodings["attention_mask"]
+ return inputs
+
+ # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.batch_decode with Whisper->Clvp
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ # Copied from transformers.models.whisper.processing_whisper.WhisperProcessor.decode with Whisper->Clvp
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to ClvpTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/clvp/tokenization_clvp.py b/venv/lib/python3.10/site-packages/transformers/models/clvp/tokenization_clvp.py
new file mode 100644
index 0000000000000000000000000000000000000000..d77564f718a53bc6a3149945fafb56bbaddcb529
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/clvp/tokenization_clvp.py
@@ -0,0 +1,364 @@
+# coding=utf-8
+# Copyright 2023 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization class for CLVP."""
+
+import json
+import os
+from functools import lru_cache
+from typing import List, Optional, Tuple
+
+import regex as re
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer
+from ...utils import logging
+from .number_normalizer import EnglishNormalizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "merges_file": "merges.txt",
+}
+
+
+@lru_cache()
+# Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
+ characters the bpe code barfs on.
+
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
+ tables between utf-8 bytes and unicode strings.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+# Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word.
+
+ Word is represented as tuple of symbols (symbols being variable-length strings).
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+class ClvpTokenizer(PreTrainedTokenizer):
+ """
+ Construct a CLVP tokenizer. Based on byte-level Byte-Pair-Encoding.
+
+ This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
+
+ ```python
+ >>> from transformers import ClvpTokenizer
+
+ >>> tokenizer = ClvpTokenizer.from_pretrained("susnato/clvp_dev")
+ >>> tokenizer("Hello world")["input_ids"]
+ [62, 84, 28, 2, 179, 79]
+
+ >>> tokenizer(" Hello world")["input_ids"]
+ [2, 62, 84, 28, 2, 179, 79]
+ ```
+
+ You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you
+ call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance.
+
+
+
+ When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one).
+
+
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Path to the merges file.
+ errors (`str`, *optional*, defaults to `"replace"`):
+ Paradigm to follow when decoding bytes to UTF-8. See
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
+ The beginning of sequence token.
+ eos_token (`str`, *optional*, defaults to `"[STOP]"`):
+ The end of sequence token.
+ pad_token (`str`, *optional*, defaults to `"[STOP]"`):
+ The pad token of the sequence.
+ add_prefix_space (`bool`, *optional*, defaults to `False`):
+ Whether or not to add an initial space to the input. This allows to treat the leading word just as any
+ other word. (CLVP tokenizer detect beginning of words by the preceding space).
+ add_bos_token (`bool`, *optional*, defaults to `False`):
+ Whether to add `bos_token` in front of the sequence when add_special_tokens=True.
+ add_eos_token (`bool`, *optional*, defaults to `False`):
+ Whether to add `eos_token` in end of the sequence when add_special_tokens=True.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = [
+ "input_ids",
+ "attention_mask",
+ ]
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ errors="replace",
+ unk_token="[UNK]",
+ bos_token="<|endoftext|>",
+ eos_token="[STOP]",
+ pad_token="[STOP]",
+ add_prefix_space=False,
+ add_bos_token=False,
+ add_eos_token=False,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
+
+ self.add_bos_token = add_bos_token
+ self.add_eos_token = add_eos_token
+ self._normalizer = None
+
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ self.errors = errors # how to handle errors in decoding
+ self.byte_encoder = bytes_to_unicode()
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ bpe_merges = merges_handle.read().split("\n")[1:-1]
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
+ self.cache = {}
+ self.add_prefix_space = add_prefix_space
+
+ # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
+ self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""")
+
+ super().__init__(
+ errors=errors,
+ unk_token=unk_token,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ pad_token=pad_token,
+ add_prefix_space=add_prefix_space,
+ add_bos_token=add_bos_token,
+ add_eos_token=add_eos_token,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self):
+ return len(self.encoder)
+
+ @property
+ def normalizer(self):
+ if self._normalizer is None:
+ self._normalizer = EnglishNormalizer()
+ return self._normalizer
+
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
+ def bpe(self, token):
+ if token in self.cache:
+ return self.cache[token]
+ word = tuple(token)
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ self.cache[token] = word
+ return word
+
+ # Copied from transformers.models.llama.tokenization_llama.LlamaTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ bos_token_id = [self.bos_token_id] if self.add_bos_token else []
+ eos_token_id = [self.eos_token_id] if self.add_eos_token else []
+
+ output = bos_token_id + token_ids_0 + eos_token_id
+
+ if token_ids_1 is not None:
+ output = output + bos_token_id + token_ids_1 + eos_token_id
+
+ return output
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if not self.add_bos_token:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=False
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0))
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
+
+ def _tokenize(self, text):
+ """Tokenize a string."""
+ bpe_tokens = []
+ text = self.normalizer(text)
+ for token in re.findall(self.pat, text):
+ token = "".join(
+ self.byte_encoder[b] for b in token.encode("utf-8")
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
+
+ # if the token is "Ä " we replace it with "[SPACE]" (if "[SPACE]" is present in the vocab), otherwise we keep the "Ä ".
+ bpe_tokens.extend(
+ "[SPACE]" if bpe_token == "\u0120" and "[SPACE]" in self.encoder.keys() else bpe_token
+ for bpe_token in self.bpe(token).split(" ")
+ )
+
+ return bpe_tokens
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index)
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ text = "".join(tokens)
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
+ return text
+
+ def clean_up_tokenization(self, text):
+ text = "".join(text)
+ vocab_tokens = list(self.encoder.keys()) + list(self.added_tokens_encoder.keys())
+
+ text = text.replace("[SPACE]", " ") if "[SPACE]" in vocab_tokens else text
+ text = text.replace("[STOP]", " ") if "[STOP]" in vocab_tokens else text
+
+ text = text.replace(self.unk_token, "").replace(" ", " ").replace(" ", " ")
+ return text
+
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ writer.write("#version: 0.2\n")
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
diff --git a/venv/lib/python3.10/site-packages/transformers/models/groupvit/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/groupvit/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0de4a00bd15005fe974f7240b9bc6c940f5b789
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/groupvit/__init__.py
@@ -0,0 +1,97 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_groupvit": [
+ "GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "GroupViTConfig",
+ "GroupViTOnnxConfig",
+ "GroupViTTextConfig",
+ "GroupViTVisionConfig",
+ ],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_groupvit"] = [
+ "GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "GroupViTModel",
+ "GroupViTPreTrainedModel",
+ "GroupViTTextModel",
+ "GroupViTVisionModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_groupvit"] = [
+ "TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFGroupViTModel",
+ "TFGroupViTPreTrainedModel",
+ "TFGroupViTTextModel",
+ "TFGroupViTVisionModel",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_groupvit import (
+ GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ GroupViTConfig,
+ GroupViTOnnxConfig,
+ GroupViTTextConfig,
+ GroupViTVisionConfig,
+ )
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_groupvit import (
+ GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ GroupViTModel,
+ GroupViTPreTrainedModel,
+ GroupViTTextModel,
+ GroupViTVisionModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_groupvit import (
+ TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFGroupViTModel,
+ TFGroupViTPreTrainedModel,
+ TFGroupViTTextModel,
+ TFGroupViTVisionModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/groupvit/configuration_groupvit.py b/venv/lib/python3.10/site-packages/transformers/models/groupvit/configuration_groupvit.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c46c277f3519eda12087364fe542040f40edab9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/groupvit/configuration_groupvit.py
@@ -0,0 +1,452 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" GroupViT model configuration"""
+
+import os
+from collections import OrderedDict
+from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
+
+from ...configuration_utils import PretrainedConfig
+from ...onnx import OnnxConfig
+from ...utils import logging
+
+
+if TYPE_CHECKING:
+ from ...processing_utils import ProcessorMixin
+ from ...utils import TensorType
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class GroupViTTextConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`GroupViTTextModel`]. It is used to instantiate an
+ GroupViT model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the GroupViT
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 49408):
+ Vocabulary size of the GroupViT text model. Defines the number of different tokens that can be represented
+ by the `inputs_ids` passed when calling [`GroupViTModel`].
+ hidden_size (`int`, *optional*, defaults to 256):
+ Dimensionality of the encoder layers and the pooler layer.
+ intermediate_size (`int`, *optional*, defaults to 1024):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 4):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ max_position_embeddings (`int`, *optional*, defaults to 77):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
+ The epsilon used by the layer normalization layers.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ initializer_factor (`float`, *optional*, defaults to 1.0):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+
+ Example:
+
+ ```python
+ >>> from transformers import GroupViTTextConfig, GroupViTTextModel
+
+ >>> # Initializing a GroupViTTextModel with nvidia/groupvit-gcc-yfcc style configuration
+ >>> configuration = GroupViTTextConfig()
+
+ >>> model = GroupViTTextModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "groupvit_text_model"
+
+ def __init__(
+ self,
+ vocab_size=49408,
+ hidden_size=256,
+ intermediate_size=1024,
+ num_hidden_layers=12,
+ num_attention_heads=4,
+ max_position_embeddings=77,
+ hidden_act="quick_gelu",
+ layer_norm_eps=1e-5,
+ dropout=0.0,
+ attention_dropout=0.0,
+ initializer_range=0.02,
+ initializer_factor=1.0,
+ pad_token_id=1,
+ bos_token_id=49406,
+ eos_token_id=49407,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.dropout = dropout
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.max_position_embeddings = max_position_embeddings
+ self.layer_norm_eps = layer_norm_eps
+ self.hidden_act = hidden_act
+ self.initializer_range = initializer_range
+ self.initializer_factor = initializer_factor
+ self.attention_dropout = attention_dropout
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the text config dict if we are loading from GroupViTConfig
+ if config_dict.get("model_type") == "groupvit":
+ config_dict = config_dict["text_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class GroupViTVisionConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`GroupViTVisionModel`]. It is used to instantiate
+ an GroupViT model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the GroupViT
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ hidden_size (`int`, *optional*, defaults to 384):
+ Dimensionality of the encoder layers and the pooler layer.
+ intermediate_size (`int`, *optional*, defaults to 1536):
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ depths (`List[int]`, *optional*, defaults to [6, 3, 3]):
+ The number of layers in each encoder block.
+ num_group_tokens (`List[int]`, *optional*, defaults to [64, 8, 0]):
+ The number of group tokens for each stage.
+ num_output_groups (`List[int]`, *optional*, defaults to [64, 8, 8]):
+ The number of output groups for each stage, 0 means no group.
+ num_attention_heads (`int`, *optional*, defaults to 6):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ image_size (`int`, *optional*, defaults to 224):
+ The size (resolution) of each image.
+ patch_size (`int`, *optional*, defaults to 16):
+ The size (resolution) of each patch.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-5):
+ The epsilon used by the layer normalization layers.
+ dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ initializer_factor (`float`, *optional*, defaults to 1.0):
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
+ testing).
+
+ Example:
+
+ ```python
+ >>> from transformers import GroupViTVisionConfig, GroupViTVisionModel
+
+ >>> # Initializing a GroupViTVisionModel with nvidia/groupvit-gcc-yfcc style configuration
+ >>> configuration = GroupViTVisionConfig()
+
+ >>> model = GroupViTVisionModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "groupvit_vision_model"
+
+ def __init__(
+ self,
+ hidden_size=384,
+ intermediate_size=1536,
+ depths=[6, 3, 3],
+ num_hidden_layers=12,
+ num_group_tokens=[64, 8, 0],
+ num_output_groups=[64, 8, 8],
+ num_attention_heads=6,
+ image_size=224,
+ patch_size=16,
+ num_channels=3,
+ hidden_act="gelu",
+ layer_norm_eps=1e-5,
+ dropout=0.0,
+ attention_dropout=0.0,
+ initializer_range=0.02,
+ initializer_factor=1.0,
+ assign_eps=1.0,
+ assign_mlp_ratio=[0.5, 4],
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+
+ self.hidden_size = hidden_size
+ self.intermediate_size = intermediate_size
+ self.depths = depths
+ if num_hidden_layers != sum(depths):
+ logger.warning(
+ f"Manually setting num_hidden_layers to {num_hidden_layers}, but we expect num_hidden_layers ="
+ f" sum(depth) = {sum(depths)}"
+ )
+ self.num_hidden_layers = num_hidden_layers
+ self.num_group_tokens = num_group_tokens
+ self.num_output_groups = num_output_groups
+ self.num_attention_heads = num_attention_heads
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_channels = num_channels
+ self.hidden_act = hidden_act
+ self.layer_norm_eps = layer_norm_eps
+ self.dropout = dropout
+ self.attention_dropout = attention_dropout
+ self.initializer_range = initializer_range
+ self.initializer_factor = initializer_factor
+ self.assign_eps = assign_eps
+ self.assign_mlp_ratio = assign_mlp_ratio
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ # get the vision config dict if we are loading from GroupViTConfig
+ if config_dict.get("model_type") == "groupvit":
+ config_dict = config_dict["vision_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class GroupViTConfig(PretrainedConfig):
+ r"""
+ [`GroupViTConfig`] is the configuration class to store the configuration of a [`GroupViTModel`]. It is used to
+ instantiate a GroupViT model according to the specified arguments, defining the text model and vision model
+ configs. Instantiating a configuration with the defaults will yield a similar configuration to that of the GroupViT
+ [nvidia/groupvit-gcc-yfcc](https://huggingface.co/nvidia/groupvit-gcc-yfcc) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ text_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`GroupViTTextConfig`].
+ vision_config (`dict`, *optional*):
+ Dictionary of configuration options used to initialize [`GroupViTVisionConfig`].
+ projection_dim (`int`, *optional*, defaults to 256):
+ Dimentionality of text and vision projection layers.
+ projection_intermediate_dim (`int`, *optional*, defaults to 4096):
+ Dimentionality of intermediate layer of text and vision projection layers.
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
+ The inital value of the *logit_scale* parameter. Default is used as per the original GroupViT
+ implementation.
+ kwargs (*optional*):
+ Dictionary of keyword arguments.
+ """
+
+ model_type = "groupvit"
+
+ def __init__(
+ self,
+ text_config=None,
+ vision_config=None,
+ projection_dim=256,
+ projection_intermediate_dim=4096,
+ logit_scale_init_value=2.6592,
+ **kwargs,
+ ):
+ # If `_config_dict` exist, we use them for the backward compatibility.
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
+ # of confusion!).
+ text_config_dict = kwargs.pop("text_config_dict", None)
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
+
+ super().__init__(**kwargs)
+
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
+ if text_config_dict is not None:
+ if text_config is None:
+ text_config = {}
+
+ # This is the complete result when using `text_config_dict`.
+ _text_config_dict = GroupViTTextConfig(**text_config_dict).to_dict()
+
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
+ for key, value in _text_config_dict.items():
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
+ # If specified in `text_config_dict`
+ if key in text_config_dict:
+ message = (
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
+ f'The value `text_config_dict["{key}"]` will be used instead.'
+ )
+ # If inferred from default argument values (just to be super careful)
+ else:
+ message = (
+ f"`text_config_dict` is provided which will be used to initialize `GroupViTTextConfig`. "
+ f'The value `text_config["{key}"]` will be overriden.'
+ )
+ logger.info(message)
+
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
+ text_config.update(_text_config_dict)
+
+ if vision_config_dict is not None:
+ if vision_config is None:
+ vision_config = {}
+
+ # This is the complete result when using `vision_config_dict`.
+ _vision_config_dict = GroupViTVisionConfig(**vision_config_dict).to_dict()
+ # convert keys to string instead of integer
+ if "id2label" in _vision_config_dict:
+ _vision_config_dict["id2label"] = {
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
+ }
+
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
+ for key, value in _vision_config_dict.items():
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
+ # If specified in `vision_config_dict`
+ if key in vision_config_dict:
+ message = (
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
+ )
+ # If inferred from default argument values (just to be super careful)
+ else:
+ message = (
+ f"`vision_config_dict` is provided which will be used to initialize `GroupViTVisionConfig`."
+ f' The value `vision_config["{key}"]` will be overriden.'
+ )
+ logger.info(message)
+
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
+ vision_config.update(_vision_config_dict)
+
+ if text_config is None:
+ text_config = {}
+ logger.info("`text_config` is `None`. Initializing the `GroupViTTextConfig` with default values.")
+
+ if vision_config is None:
+ vision_config = {}
+ logger.info("`vision_config` is `None`. initializing the `GroupViTVisionConfig` with default values.")
+
+ self.text_config = GroupViTTextConfig(**text_config)
+ self.vision_config = GroupViTVisionConfig(**vision_config)
+
+ self.projection_dim = projection_dim
+ self.projection_intermediate_dim = projection_intermediate_dim
+ self.logit_scale_init_value = logit_scale_init_value
+ self.initializer_range = 0.02
+ self.initializer_factor = 1.0
+ self.output_segmentation = False
+
+ @classmethod
+ def from_text_vision_configs(cls, text_config: GroupViTTextConfig, vision_config: GroupViTVisionConfig, **kwargs):
+ r"""
+ Instantiate a [`GroupViTConfig`] (or a derived class) from groupvit text model configuration and groupvit
+ vision model configuration.
+
+ Returns:
+ [`GroupViTConfig`]: An instance of a configuration object
+ """
+
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
+
+
+class GroupViTOnnxConfig(OnnxConfig):
+ @property
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("input_ids", {0: "batch", 1: "sequence"}),
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
+ ("attention_mask", {0: "batch", 1: "sequence"}),
+ ]
+ )
+
+ @property
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
+ return OrderedDict(
+ [
+ ("logits_per_image", {0: "batch"}),
+ ("logits_per_text", {0: "batch"}),
+ ("text_embeds", {0: "batch"}),
+ ("image_embeds", {0: "batch"}),
+ ]
+ )
+
+ @property
+ def atol_for_validation(self) -> float:
+ return 1e-4
+
+ def generate_dummy_inputs(
+ self,
+ processor: "ProcessorMixin",
+ batch_size: int = -1,
+ seq_length: int = -1,
+ framework: Optional["TensorType"] = None,
+ ) -> Mapping[str, Any]:
+ text_input_dict = super().generate_dummy_inputs(
+ processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
+ )
+ image_input_dict = super().generate_dummy_inputs(
+ processor.image_processor, batch_size=batch_size, framework=framework
+ )
+ return {**text_input_dict, **image_input_dict}
+
+ @property
+ def default_onnx_opset(self) -> int:
+ return 14
diff --git a/venv/lib/python3.10/site-packages/transformers/models/groupvit/modeling_groupvit.py b/venv/lib/python3.10/site-packages/transformers/models/groupvit/modeling_groupvit.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec383b0fcfa6cb3951db90fd8cea5be5936518e3
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/groupvit/modeling_groupvit.py
@@ -0,0 +1,1584 @@
+# coding=utf-8
+# Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch GroupViT model."""
+
+
+import collections.abc
+import math
+from dataclasses import dataclass
+from typing import Any, Optional, Tuple, Union
+
+import numpy as np
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc"
+
+
+from ..deprecated._archive_maps import GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# contrastive loss function, adapted from
+# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
+def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
+
+
+# Copied from transformers.models.clip.modeling_clip.clip_loss with clip->groupvit
+def groupvit_loss(similarity: torch.Tensor) -> torch.Tensor:
+ caption_loss = contrastive_loss(similarity)
+ image_loss = contrastive_loss(similarity.t())
+ return (caption_loss + image_loss) / 2.0
+
+
+def hard_softmax(logits: torch.Tensor, dim: int):
+ y_soft = logits.softmax(dim)
+ # Straight through.
+ index = y_soft.max(dim, keepdim=True)[1]
+ y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
+ ret = y_hard - y_soft.detach() + y_soft
+
+ return ret
+
+
+def gumbel_softmax(logits: torch.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> torch.Tensor:
+ # more stable https://github.com/pytorch/pytorch/issues/41663
+ gumbel_dist = torch.distributions.gumbel.Gumbel(
+ torch.tensor(0.0, device=logits.device, dtype=logits.dtype),
+ torch.tensor(1.0, device=logits.device, dtype=logits.dtype),
+ )
+ gumbels = gumbel_dist.sample(logits.shape)
+
+ gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)
+ y_soft = gumbels.softmax(dim)
+
+ if hard:
+ # Straight through.
+ index = y_soft.max(dim, keepdim=True)[1]
+ y_hard = torch.zeros_like(logits, memory_format=torch.legacy_contiguous_format).scatter_(dim, index, 1.0)
+ ret = y_hard - y_soft.detach() + y_soft
+ else:
+ # Reparametrization trick.
+ ret = y_soft
+ return ret
+
+
+def resize_attention_map(attentions, height, width, align_corners=False):
+ """
+ Args:
+ attentions (`torch.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width]
+ height (`int`): height of the output attention map
+ width (`int`): width of the output attention map
+ align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`.
+
+ Returns:
+ `torch.Tensor`: resized attention map of shape [batch_size, groups, height, width]
+ """
+
+ scale = (height * width // attentions.shape[2]) ** 0.5
+ if height > width:
+ feat_width = int(np.round(width / scale))
+ feat_height = attentions.shape[2] // feat_width
+ else:
+ feat_height = int(np.round(height / scale))
+ feat_width = attentions.shape[2] // feat_height
+
+ batch_size = attentions.shape[0]
+ groups = attentions.shape[1] # number of group token
+ # [batch_size, groups, height*width, groups] -> [batch_size, groups, height, width]
+ attentions = attentions.reshape(batch_size, groups, feat_height, feat_width)
+ attentions = nn.functional.interpolate(
+ attentions, size=(height, width), mode="bilinear", align_corners=align_corners
+ )
+ return attentions
+
+
+def get_grouping_from_attentions(attentions, hw_shape):
+ """
+ Args:
+ attentions (`tuple(torch.FloatTensor)`: tuple of attention maps returned by `GroupViTVisionTransformer`
+ hw_shape (`tuple(int)`): height and width of the output attention map
+ Returns:
+ `torch.Tensor`: the attention map of shape [batch_size, groups, height, width]
+ """
+
+ attn_maps = []
+ with torch.no_grad():
+ prev_attn_masks = None
+ for attn_masks in attentions:
+ # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups]
+ attn_masks = attn_masks.permute(0, 2, 1).contiguous()
+ if prev_attn_masks is None:
+ prev_attn_masks = attn_masks
+ else:
+ prev_attn_masks = prev_attn_masks @ attn_masks
+ # [batch_size, heightxwidth, num_groups] -> [batch_size, num_groups, heightxwidth] -> [batch_size, num_groups, height, width]
+ cur_attn_map = resize_attention_map(prev_attn_masks.permute(0, 2, 1).contiguous(), *hw_shape)
+ attn_maps.append(cur_attn_map)
+
+ # [batch_size, num_groups, height, width]
+ final_grouping = attn_maps[-1]
+
+ return final_grouping
+
+
+class GroupViTCrossAttentionLayer(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig):
+ super().__init__()
+ self.attn = GroupViTAttention(config)
+ self.norm2 = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.mlp = GroupViTMLP(config)
+ self.norm_post = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ def forward(self, query, key):
+ x = query
+ x = x + self.attn(query, encoder_hidden_states=key)[0]
+ x = x + self.mlp(self.norm2(x))
+ x = self.norm_post(x)
+ return x
+
+
+class GroupViTAssignAttention(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig):
+ super().__init__()
+ self.scale = config.hidden_size**-0.5
+
+ self.q_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.k_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.v_proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.proj = nn.Linear(config.hidden_size, config.hidden_size)
+ self.assign_eps = config.assign_eps
+
+ def get_attn(self, attn, gumbel=True, hard=True):
+ if gumbel and self.training:
+ attn = gumbel_softmax(attn, dim=-2, hard=hard)
+ else:
+ if hard:
+ attn = hard_softmax(attn, dim=-2)
+ else:
+ attn = nn.functional.softmax(attn, dim=-2)
+
+ return attn
+
+ def forward(self, query, key):
+ value = key
+ # [batch_size, query_length, channels]
+ query = self.q_proj(query)
+
+ # [batch_size, key_length, channels]
+ key = self.k_proj(key)
+
+ # [batch_size, key_length, channels]
+ value = self.v_proj(value)
+
+ # [batch_size, query_length, key_length]
+ raw_attn = (query @ key.transpose(-2, -1)) * self.scale
+
+ attn = self.get_attn(raw_attn)
+ soft_attn = self.get_attn(raw_attn, gumbel=False, hard=False)
+
+ attn = attn / (attn.sum(dim=-1, keepdim=True) + self.assign_eps)
+
+ out = attn @ value
+
+ out = self.proj(out)
+
+ return out, soft_attn
+
+
+class GroupViTTokenAssign(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig, num_group_token, num_output_group):
+ super().__init__()
+ self.num_output_group = num_output_group
+ # norm on group_tokens
+ self.norm_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ assign_mlp_ratio = (
+ config.assign_mlp_ratio
+ if isinstance(config.assign_mlp_ratio, collections.abc.Iterable)
+ else (config.assign_mlp_ratio, config.assign_mlp_ratio)
+ )
+ tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio]
+ self.mlp_inter = GroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group)
+ self.norm_post_tokens = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ # norm on x
+ self.norm_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.pre_assign_attn = GroupViTCrossAttentionLayer(config)
+
+ self.assign = GroupViTAssignAttention(config)
+ self.norm_new_x = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.mlp_channels = GroupViTMLP(config, config.hidden_size, channels_dim, config.hidden_size)
+
+ def project_group_token(self, group_tokens):
+ """
+ Args:
+ group_tokens (torch.Tensor): group tokens, [batch_size, num_group_tokens, channels]
+
+ Returns:
+ projected_group_tokens (torch.Tensor): [batch_size, num_output_groups, channels]
+ """
+ # [B, num_output_groups, C] <- [B, num_group_tokens, C]
+ projected_group_tokens = self.mlp_inter(group_tokens)
+ projected_group_tokens = self.norm_post_tokens(projected_group_tokens)
+ return projected_group_tokens
+
+ def forward(self, image_tokens, group_tokens):
+ """
+ Args:
+ image_tokens (`torch.Tensor`): image tokens, of shape [batch_size, input_length, channels]
+ group_tokens (`torch.Tensor`): group tokens, [batch_size, num_group_tokens, channels]
+ """
+
+ group_tokens = self.norm_tokens(group_tokens)
+ image_tokens = self.norm_x(image_tokens)
+ # [batch_size, num_output_groups, channels]
+ projected_group_tokens = self.project_group_token(group_tokens)
+ projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens)
+ new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens)
+ new_image_tokens += projected_group_tokens
+
+ new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens))
+
+ return new_image_tokens, attention
+
+
+@dataclass
+class GroupViTModelOutput(ModelOutput):
+ """
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for image-text similarity.
+ logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
+ similarity scores.
+ logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
+ similarity scores.
+ segmentation_logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
+ Classification scores for each pixel.
+
+
+
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
+ original image size as post-processing. You should always check your logits shape and resize as needed.
+
+
+
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output of
+ [`GroupViTTextModel`].
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
+ The image embeddings obtained by applying the projection layer to the pooled output of
+ [`GroupViTVisionModel`].
+ text_model_output (`BaseModelOutputWithPooling`):
+ The output of the [`GroupViTTextModel`].
+ vision_model_output (`BaseModelOutputWithPooling`):
+ The output of the [`GroupViTVisionModel`].
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits_per_image: torch.FloatTensor = None
+ logits_per_text: torch.FloatTensor = None
+ segmentation_logits: torch.FloatTensor = None
+ text_embeds: torch.FloatTensor = None
+ image_embeds: torch.FloatTensor = None
+ text_model_output: BaseModelOutputWithPooling = None
+ vision_model_output: BaseModelOutputWithPooling = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+class GroupViTPatchEmbeddings(nn.Module):
+ """
+ Image to Patch Embedding.
+ """
+
+ def __init__(
+ self,
+ image_size: int = 224,
+ patch_size: Union[int, Tuple[int, int]] = 16,
+ num_channels: int = 3,
+ embed_dim: int = 768,
+ ):
+ super().__init__()
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_patches = num_patches
+
+ self.projection = nn.Conv2d(num_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
+
+ def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ if not interpolate_pos_encoding:
+ if height != self.image_size[0] or width != self.image_size[1]:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model"
+ f" ({self.image_size[0]}*{self.image_size[1]})."
+ )
+ x = self.projection(pixel_values).flatten(2).transpose(1, 2)
+ return x
+
+
+class GroupViTVisionEmbeddings(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig):
+ super().__init__()
+
+ self.patch_embeddings = GroupViTPatchEmbeddings(
+ image_size=config.image_size,
+ patch_size=config.patch_size,
+ num_channels=config.num_channels,
+ embed_dim=config.hidden_size,
+ )
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches, config.hidden_size))
+ self.dropout = nn.Dropout(config.dropout)
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.config = config
+
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images.
+
+ Source:
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
+ """
+
+ npatch = embeddings.shape[1]
+ if npatch == self.position_embeddings.shape[1] and height == width:
+ return self.position_embeddings
+ patch_pos_embed = self.position_embeddings
+ num_original_pos_embed = patch_pos_embed.shape[1]
+ dim = embeddings.shape[-1]
+ feat_height = height // self.config.patch_size
+ feat_width = width // self.config.patch_size
+ # we add a small number to avoid floating point error in the interpolation
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
+ feat_height, feat_width = feat_height + 0.1, feat_width + 0.1
+ original_height = original_width = math.sqrt(num_original_pos_embed)
+ reshaped_patch_pos_embed = patch_pos_embed.reshape(1, int(original_height), int(original_width), dim).permute(
+ 0, 3, 1, 2
+ )
+ scale_factor = (feat_height / original_height, feat_width / original_width)
+ patch_pos_embed = nn.functional.interpolate(
+ reshaped_patch_pos_embed,
+ scale_factor=scale_factor,
+ mode="bicubic",
+ align_corners=False,
+ )
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
+ return patch_pos_embed
+
+ def forward(self, pixel_values: torch.Tensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
+
+ embeddings = self.layernorm(embeddings)
+
+ batch_size, seq_len, _ = embeddings.size()
+
+ # add positional encoding to each token
+ if interpolate_pos_encoding:
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+ else:
+ embeddings = embeddings + self.position_embeddings
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->GroupViT
+class GroupViTTextEmbeddings(nn.Module):
+ def __init__(self, config: GroupViTTextConfig):
+ super().__init__()
+ embed_dim = config.hidden_size
+
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
+
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ ) -> torch.Tensor:
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, :seq_length]
+
+ if inputs_embeds is None:
+ inputs_embeds = self.token_embedding(input_ids)
+
+ position_embeddings = self.position_embedding(position_ids)
+ embeddings = inputs_embeds + position_embeddings
+
+ return embeddings
+
+
+class GroupViTStage(nn.Module):
+ """This corresponds to the `GroupingLayer` class in the GroupViT implementation."""
+
+ def __init__(
+ self,
+ config: GroupViTVisionConfig,
+ depth: int,
+ num_prev_group_token: int,
+ num_group_token: int,
+ num_output_group: int,
+ ):
+ super().__init__()
+ self.depth = depth
+ self.num_group_token = num_group_token
+ if num_group_token > 0:
+ self.group_token = nn.Parameter(torch.zeros(1, num_group_token, config.hidden_size))
+ else:
+ self.group_token = None
+ self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(depth)])
+
+ if num_group_token > 0:
+ self.downsample = GroupViTTokenAssign(
+ config=config,
+ num_group_token=num_group_token,
+ num_output_group=num_output_group,
+ )
+ else:
+ self.downsample = None
+
+ if num_prev_group_token > 0 and num_group_token > 0:
+ self.group_projector = nn.Sequential(
+ nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps),
+ GroupViTMixerMLP(config, num_prev_group_token, config.hidden_size // 2, num_group_token),
+ )
+ else:
+ self.group_projector = None
+
+ @property
+ def with_group_token(self):
+ return self.group_token is not None
+
+ def split_x(self, x):
+ if self.with_group_token:
+ return x[:, : -self.num_group_token], x[:, -self.num_group_token :]
+ else:
+ return x, None
+
+ def concat_x(self, x: torch.Tensor, group_token: Optional[torch.Tensor] = None) -> torch.Tensor:
+ if group_token is None:
+ return x
+ return torch.cat([x, group_token], dim=1)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ prev_group_token: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the grouping tensors of Grouping block.
+ """
+ if self.with_group_token:
+ group_token = self.group_token.expand(hidden_states.size(0), -1, -1)
+ if self.group_projector is not None:
+ group_token = group_token + self.group_projector(prev_group_token)
+ else:
+ group_token = None
+
+ x = hidden_states
+
+ cat_x = self.concat_x(x, group_token)
+ for layer in self.layers:
+ layer_out = layer(cat_x, attention_mask=None, causal_attention_mask=None)
+ cat_x = layer_out[0]
+
+ x, group_token = self.split_x(cat_x)
+
+ attention = None
+ if self.downsample is not None:
+ x, attention = self.downsample(x, group_token)
+
+ outputs = (x, group_token)
+ if output_attentions:
+ outputs = outputs + (attention,)
+
+ return outputs
+
+
+class GroupViTMLP(nn.Module):
+ def __init__(
+ self,
+ config: GroupViTVisionConfig,
+ hidden_size: Optional[int] = None,
+ intermediate_size: Optional[int] = None,
+ output_size: Optional[int] = None,
+ ):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ hidden_size = hidden_size if hidden_size is not None else config.hidden_size
+ intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
+ output_size = output_size if output_size is not None else hidden_size
+ self.fc1 = nn.Linear(hidden_size, intermediate_size)
+ self.fc2 = nn.Linear(intermediate_size, output_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+class GroupViTMixerMLP(GroupViTMLP):
+ def forward(self, x):
+ x = super().forward(x.transpose(1, 2))
+ return x.transpose(1, 2)
+
+
+class GroupViTAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = config.attention_dropout
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_attention_mask: Optional[torch.Tensor] = None,
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ bsz, tgt_len, embed_dim = hidden_states.size()
+ is_cross_attention = encoder_hidden_states is not None
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scale
+ if is_cross_attention:
+ key_states = self._shape(self.k_proj(encoder_hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(encoder_hidden_states), -1, bsz)
+ else:
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ # apply the causal_attention_mask first
+ if causal_attention_mask is not None:
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {causal_attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if output_attentions:
+ # this operation is a bit akward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->GroupViT
+class GroupViTEncoderLayer(nn.Module):
+ def __init__(self, config: GroupViTConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.self_attn = GroupViTAttention(config)
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.mlp = GroupViTMLP(config)
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ causal_attention_mask: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ causal_attention_mask=causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+class GroupViTPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = GroupViTConfig
+ base_model_prefix = "groupvit"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+
+ init_range = self.config.initializer_range
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=init_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ factor = self.config.initializer_factor
+ if isinstance(module, GroupViTTextEmbeddings):
+ module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
+ module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
+ elif isinstance(module, GroupViTAttention):
+ factor = self.config.initializer_factor
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
+ out_proj_std = (module.embed_dim**-0.5) * factor
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
+ elif isinstance(module, GroupViTMLP):
+ factor = self.config.initializer_factor
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
+ nn.init.normal_(module.fc1.weight, std=fc_std)
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
+
+
+GROUPVIT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+GROUPVIT_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+GROUPVIT_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+GROUPVIT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`CLIPTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`CLIPImageProcessor.__call__`] for details.
+ return_loss (`bool`, *optional*):
+ Whether or not to return the contrastive loss.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+class GroupViTVisionEncoder(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig) -> None:
+ super().__init__()
+ self.config = config
+ self.stages = nn.ModuleList(
+ [
+ GroupViTStage(
+ config=config,
+ depth=config.depths[i],
+ num_group_token=config.num_group_tokens[i],
+ num_output_group=config.num_output_groups[i],
+ num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0,
+ )
+ for i in range(len(config.depths))
+ ]
+ )
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[tuple, BaseModelOutput]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ all_hidden_states = () if output_hidden_states else None
+ all_groupings = () if output_attentions else None
+
+ group_tokens = None
+
+ for i, stage in enumerate(self.stages):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = stage(hidden_states, group_tokens, output_attentions)
+
+ hidden_states = layer_outputs[0]
+ group_tokens = layer_outputs[1]
+
+ if output_attentions and layer_outputs[2] is not None:
+ all_groupings = all_groupings + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings
+ )
+
+
+class GroupViTTextEncoder(nn.Module):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self-attention layers. Each layer is a
+ [`GroupViTEncoderLayer`].
+
+ Args:
+ config: GroupViTTextConfig
+ """
+
+ def __init__(self, config: GroupViTTextConfig):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([GroupViTEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ inputs_embeds,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder, CLIP_TEXT->GROUPVIT_TEXT
+class GroupViTTextTransformer(nn.Module):
+ def __init__(self, config: GroupViTTextConfig):
+ super().__init__()
+ self.config = config
+ embed_dim = config.hidden_size
+ self.embeddings = GroupViTTextEmbeddings(config)
+ self.encoder = GroupViTTextEncoder(config)
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ # For `pooled_output` computation
+ self.eos_token_id = config.eos_token_id
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is None:
+ raise ValueError("You have to specify input_ids")
+
+ input_shape = input_ids.size()
+ input_ids = input_ids.view(-1, input_shape[-1])
+
+ hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
+
+ # CLIP's text model uses causal mask, prepare it here.
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
+ causal_attention_mask = _create_4d_causal_attention_mask(
+ input_shape, hidden_states.dtype, device=hidden_states.device
+ )
+ # expand attention_mask
+ if attention_mask is not None:
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ attention_mask=attention_mask,
+ causal_attention_mask=causal_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
+
+ if self.eos_token_id == 2:
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
+ # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
+ # ------------------------------------------------------------
+ # text_embeds.shape = [batch_size, sequence_length, transformer.width]
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
+ # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
+ pooled_output = last_hidden_state[
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
+ input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
+ ]
+ else:
+ # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
+ pooled_output = last_hidden_state[
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
+ # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)
+ (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)
+ .int()
+ .argmax(dim=-1),
+ ]
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class GroupViTTextModel(GroupViTPreTrainedModel):
+ config_class = GroupViTTextConfig
+
+ def __init__(self, config: GroupViTTextConfig):
+ super().__init__(config)
+ self.text_model = GroupViTTextTransformer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> nn.Module:
+ return self.text_model.embeddings.token_embedding
+
+ def set_input_embeddings(self, value):
+ self.text_model.embeddings.token_embedding = value
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTTextConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import CLIPTokenizer, GroupViTTextModel
+
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> model = GroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
+ ```"""
+ return self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+class GroupViTVisionTransformer(nn.Module):
+ def __init__(self, config: GroupViTVisionConfig):
+ super().__init__()
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = GroupViTVisionEmbeddings(config)
+ self.encoder = GroupViTVisionEncoder(config)
+ self.layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ hidden_states=hidden_states,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ # normalize the last hidden state
+ last_hidden_state = self.layernorm(last_hidden_state)
+ pooled_output = last_hidden_state.mean(dim=1)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class GroupViTVisionModel(GroupViTPreTrainedModel):
+ config_class = GroupViTVisionConfig
+ main_input_name = "pixel_values"
+
+ def __init__(self, config: GroupViTVisionConfig):
+ super().__init__(config)
+ self.vision_model = GroupViTVisionTransformer(config)
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self) -> GroupViTPatchEmbeddings:
+ return self.vision_model.embeddings.patch_embeddings
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, GroupViTVisionModel
+
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> model = GroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
+ ```"""
+ return self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+
+@add_start_docstrings(GROUPVIT_START_DOCSTRING)
+class GroupViTModel(GroupViTPreTrainedModel):
+ config_class = GroupViTConfig
+
+ def __init__(self, config: GroupViTConfig):
+ super().__init__(config)
+
+ if not isinstance(config.text_config, GroupViTTextConfig):
+ raise ValueError(
+ "config.text_config is expected to be of type GroupViTTextConfig but is of type"
+ f" {type(config.text_config)}."
+ )
+
+ if not isinstance(config.vision_config, GroupViTVisionConfig):
+ raise ValueError(
+ "config.vision_config is expected to be of type GroupViTVisionConfig but is of type"
+ f" {type(config.vision_config)}."
+ )
+
+ text_config = config.text_config
+ vision_config = config.vision_config
+
+ self.projection_dim = config.projection_dim
+ self.projection_intermediate_dim = config.projection_intermediate_dim
+ self.text_embed_dim = text_config.hidden_size
+ self.vision_embed_dim = vision_config.hidden_size
+
+ self.text_model = GroupViTTextTransformer(text_config)
+ self.vision_model = GroupViTVisionTransformer(vision_config)
+
+ self.visual_projection = nn.Sequential(
+ nn.Linear(self.vision_embed_dim, self.projection_intermediate_dim, bias=True),
+ nn.BatchNorm1d(self.projection_intermediate_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True),
+ )
+ self.text_projection = nn.Sequential(
+ nn.Linear(self.text_embed_dim, self.projection_intermediate_dim, bias=True),
+ nn.BatchNorm1d(self.projection_intermediate_dim),
+ nn.ReLU(inplace=True),
+ nn.Linear(self.projection_intermediate_dim, self.projection_dim, bias=True),
+ )
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING)
+ def get_text_features(
+ self,
+ input_ids: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
+ applying the projection layer to the pooled output of [`GroupViTTextModel`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import CLIPTokenizer, GroupViTModel
+
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
+ >>> text_features = model.get_text_features(**inputs)
+ ```"""
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = text_outputs[1]
+ text_features = self.text_projection(pooled_output)
+
+ return text_features
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
+ def get_image_features(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> torch.FloatTensor:
+ r"""
+ Returns:
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
+ applying the projection layer to the pooled output of [`GroupViTVisionModel`].
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, GroupViTModel
+
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="pt")
+
+ >>> image_features = model.get_image_features(**inputs)
+ ```"""
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ pooled_output = vision_outputs[1] # pooled_output
+ image_features = self.visual_projection(pooled_output)
+
+ return image_features
+
+ @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=GroupViTModelOutput, config_class=GroupViTConfig)
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_segmentation: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, GroupViTModelOutput]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, GroupViTModel
+
+ >>> model = GroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
+ ... )
+
+ >>> outputs = model(**inputs)
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
+ ```"""
+ # Use GROUPVIT model's config for some fields (if specified) instead of those of vision & text components.
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_segmentation = (
+ output_segmentation if output_segmentation is not None else self.config.output_segmentation
+ )
+ if output_segmentation:
+ output_attentions = True
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ image_embeds = vision_outputs[1]
+ image_embeds = self.visual_projection(image_embeds)
+
+ text_embeds = text_outputs[1]
+ text_embeds = self.text_projection(text_embeds)
+
+ # normalized features
+ image_embeds = image_embeds / image_embeds.norm(dim=-1, keepdim=True)
+ text_embeds = text_embeds / text_embeds.norm(dim=-1, keepdim=True)
+
+ # cosine similarity as logits
+ logit_scale = self.logit_scale.exp()
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
+ logits_per_image = logits_per_text.t()
+
+ seg_logits = None
+ if output_segmentation:
+ # grouped features
+ # [batch_size_image, num_group, hidden_size]
+ image_group_embeds = vision_outputs[0]
+ # [batch_size_image*num_group, hidden_size]
+ image_group_embeds = self.visual_projection(image_group_embeds.reshape(-1, image_group_embeds.shape[-1]))
+ if output_hidden_states:
+ attentions = vision_outputs[3]
+ else:
+ attentions = vision_outputs[2]
+ # [batch_size_image, num_group, height, width]
+ grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:])
+
+ # normalized features
+ image_group_embeds = image_group_embeds / image_group_embeds.norm(dim=-1, keepdim=True)
+ # [batch_size_image x num_group, batch_size_text]
+ logits_per_image_group = torch.matmul(image_group_embeds, text_embeds.t()) * logit_scale
+ # [batch_size_image, batch_size_text, num_group]
+ logits_per_image_group = logits_per_image_group.reshape(
+ image_embeds.shape[0], -1, text_embeds.shape[0]
+ ).permute(0, 2, 1)
+
+ # [batch_size_image, batch_size_text, height x width]
+ flatten_grouping = grouping.reshape(grouping.shape[0], grouping.shape[1], -1)
+
+ # [batch_size_image, batch_size_text, height, width]
+ seg_logits = torch.matmul(logits_per_image_group, flatten_grouping) * logit_scale
+ seg_logits = seg_logits.reshape(
+ seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3]
+ )
+
+ loss = None
+ if return_loss:
+ loss = groupvit_loss(logits_per_text)
+
+ if not return_dict:
+ if seg_logits is not None:
+ output = (
+ logits_per_image,
+ logits_per_text,
+ seg_logits,
+ text_embeds,
+ image_embeds,
+ text_outputs,
+ vision_outputs,
+ )
+ else:
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return GroupViTModelOutput(
+ loss=loss,
+ logits_per_image=logits_per_image,
+ logits_per_text=logits_per_text,
+ segmentation_logits=seg_logits,
+ text_embeds=text_embeds,
+ image_embeds=image_embeds,
+ text_model_output=text_outputs,
+ vision_model_output=vision_outputs,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/groupvit/modeling_tf_groupvit.py b/venv/lib/python3.10/site-packages/transformers/models/groupvit/modeling_tf_groupvit.py
new file mode 100644
index 0000000000000000000000000000000000000000..31c76083e02287f3428356d0b9b7b26522668420
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/groupvit/modeling_tf_groupvit.py
@@ -0,0 +1,2133 @@
+# coding=utf-8
+# Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 GroupViT model."""
+
+
+from __future__ import annotations
+
+import collections.abc
+import math
+from dataclasses import dataclass
+from typing import Any, Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling
+from ...modeling_tf_utils import (
+ TFModelInputType,
+ TFPreTrainedModel,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ ModelOutput,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ is_tensorflow_probability_available,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_groupvit import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+# soft dependency
+if is_tensorflow_probability_available():
+ try:
+ import tensorflow_probability as tfp
+
+ # On the first call, check whether a compatible version of TensorFlow is installed
+ # TensorFlow Probability depends on a recent stable release of TensorFlow
+ _ = tfp.distributions.Normal(loc=0.0, scale=1.0)
+ except ImportError:
+ logger.error(
+ "GroupViT models are not usable since `tensorflow_probability` can't be loaded. "
+ "It seems you have `tensorflow_probability` installed with the wrong tensorflow version."
+ "Please try to reinstall it following the instructions here: https://github.com/tensorflow/probability."
+ )
+
+_CHECKPOINT_FOR_DOC = "nvidia/groupvit-gcc-yfcc"
+
+
+from ..deprecated._archive_maps import TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+LARGE_NEGATIVE = -1e8
+
+
+# Copied from transformers.models.bart.modeling_tf_bart._expand_mask
+def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
+ """
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
+ """
+ src_len = shape_list(mask)[1]
+ tgt_len = tgt_len if tgt_len is not None else src_len
+ one_cst = tf.constant(1.0)
+ mask = tf.cast(mask, dtype=one_cst.dtype)
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
+
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
+
+
+# contrastive loss function, adapted from
+# https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
+def contrastive_loss(logits: tf.Tensor) -> tf.Tensor:
+ return tf.math.reduce_mean(
+ keras.metrics.sparse_categorical_crossentropy(
+ y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True
+ )
+ )
+
+
+# Copied from transformers.models.clip.modeling_tf_clip.clip_loss with clip->groupvit
+def groupvit_loss(similarity: tf.Tensor) -> tf.Tensor:
+ caption_loss = contrastive_loss(similarity)
+ image_loss = contrastive_loss(tf.transpose(similarity))
+ return (caption_loss + image_loss) / 2.0
+
+
+def hard_softmax(logits: tf.Tensor, dim: int) -> tf.Tensor:
+ y_soft = stable_softmax(logits, dim)
+ # Straight through.
+ index = tf.argmax(y_soft, dim)
+ y_hard = tf.one_hot(
+ index,
+ depth=shape_list(logits)[dim],
+ # TensorFlow expects axis to be -1 or between [0, 3). But received: -2
+ # This is why the following code snippet is used.
+ axis=range(len(shape_list(logits)))[dim],
+ dtype=y_soft.dtype,
+ )
+ ret = y_hard - tf.stop_gradient(y_soft) + y_soft
+
+ return ret
+
+
+def gumbel_softmax(logits: tf.Tensor, tau: float = 1, hard: bool = False, dim: int = -1) -> tf.Tensor:
+ gumbel_dist = tfp.distributions.Gumbel(0.0, 1.0)
+ gumbels = gumbel_dist.sample(tf.shape(logits), dtype=logits.dtype)
+
+ gumbels = (logits + gumbels) / tau # ~Gumbel(logits,tau)
+ y_soft = stable_softmax(gumbels, dim)
+
+ if hard:
+ # Straight through.
+ index = tf.argmax(y_soft, dim)
+ y_hard = tf.one_hot(
+ index,
+ depth=shape_list(logits)[dim],
+ # TensorFlow expects axis to be -1 or between [0, 3). But received: -2
+ # This is why the following code snippet is used.
+ axis=range(len(shape_list(logits)))[dim],
+ dtype=y_soft.dtype,
+ )
+ ret = y_hard - tf.stop_gradient(y_soft) + y_soft
+ else:
+ # Reparametrization trick.
+ ret = y_soft
+ return ret
+
+
+def resize_attention_map(attentions: tf.Tensor, height: int, width: int, align_corners: bool = False) -> tf.Tensor:
+ """
+ Args:
+ attentions (`tf.Tensor`): attention map of shape [batch_size, groups, feat_height*feat_width]
+ height (`int`): height of the output attention map
+ width (`int`): width of the output attention map
+ align_corners (`bool`, *optional*): the `align_corner` argument for `nn.functional.interpolate`.
+
+ Returns:
+ `tf.Tensor`: resized attention map of shape [batch_size, groups, height, width]
+ """
+
+ scale = (height * width // attentions.shape[2]) ** 0.5
+ if height > width:
+ feat_width = int(np.round(width / scale))
+ feat_height = shape_list(attentions)[2] // feat_width
+ else:
+ feat_height = int(np.round(height / scale))
+ feat_width = shape_list(attentions)[2] // feat_height
+
+ batch_size = shape_list(attentions)[0]
+ groups = shape_list(attentions)[1] # number of group token
+ # [batch_size, groups, height x width, groups] -> [batch_size, groups, height, width]
+ attentions = tf.reshape(attentions, (batch_size, groups, feat_height, feat_width))
+ attentions = tf.transpose(attentions, perm=(0, 2, 3, 1))
+ if align_corners:
+ attentions = tf.compat.v1.image.resize(
+ attentions,
+ size=(height, width),
+ method="bilinear",
+ align_corners=align_corners,
+ )
+ else:
+ attentions = tf.image.resize(attentions, size=(height, width), method="bilinear")
+ attentions = tf.transpose(attentions, perm=(0, 3, 1, 2))
+ return attentions
+
+
+def get_grouping_from_attentions(attentions: Tuple[tf.Tensor], hw_shape: Tuple[int]) -> tf.Tensor:
+ """
+ Args:
+ attentions (`tuple(tf.Tensor)`: tuple of attention maps returned by `TFGroupViTVisionTransformer`
+ hw_shape (`tuple(int)`): height and width of the output attention map
+ Returns:
+ `tf.Tensor`: the attention map of shape [batch_size, groups, height, width]
+ """
+
+ attn_maps = []
+ prev_attn_masks = None
+ for attn_masks in attentions:
+ # [batch_size, num_groups, height x width] -> [batch_size, height x width, num_groups]
+ attn_masks = tf.transpose(attn_masks, perm=(0, 2, 1))
+ if prev_attn_masks is None:
+ prev_attn_masks = attn_masks
+ else:
+ prev_attn_masks = tf.matmul(prev_attn_masks, attn_masks)
+ # [batch_size, height x width, num_groups] -> [batch_size, num_groups, height x width] -> [batch_size, num_groups, height, width]
+ cur_attn_map = resize_attention_map(tf.transpose(prev_attn_masks, perm=(0, 2, 1)), *hw_shape)
+ attn_maps.append(cur_attn_map)
+
+ # [batch_size, num_groups, height, width]
+ final_grouping = attn_maps[-1]
+
+ return tf.stop_gradient(final_grouping)
+
+
+@dataclass
+class TFGroupViTModelOutput(ModelOutput):
+ """
+ Args:
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
+ Contrastive loss for image-text similarity.
+ logits_per_image (`tf.Tensor` of shape `(image_batch_size, text_batch_size)`):
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
+ similarity scores.
+ logits_per_text (`tf.Tensor` of shape `(text_batch_size, image_batch_size)`):
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
+ similarity scores.
+ segmentation_logits (`tf.Tensor` of shape `(batch_size, config.num_labels, logits_height, logits_width)`):
+ Classification scores for each pixel.
+
+
+
+ The logits returned do not necessarily have the same size as the `pixel_values` passed as inputs. This is
+ to avoid doing two interpolations and lose some quality when a user needs to resize the logits to the
+ original image size as post-processing. You should always check your logits shape and resize as needed.
+
+
+
+ text_embeds (`tf.Tensor` of shape `(batch_size, output_dim`):
+ The text embeddings obtained by applying the projection layer to the pooled output of
+ [`TFGroupViTTextModel`].
+ image_embeds (`tf.Tensor` of shape `(batch_size, output_dim`):
+ The image embeddings obtained by applying the projection layer to the pooled output of
+ [`TFGroupViTVisionModel`].
+ text_model_output (`TFBaseModelOutputWithPooling`):
+ The output of the [`TFGroupViTTextModel`].
+ vision_model_output (`TFBaseModelOutputWithPooling`):
+ The output of the [`TFGroupViTVisionModel`].
+ """
+
+ loss: tf.Tensor | None = None
+ logits_per_image: tf.Tensor = None
+ logits_per_text: tf.Tensor = None
+ segmentation_logits: tf.Tensor = None
+ text_embeds: tf.Tensor = None
+ image_embeds: tf.Tensor = None
+ text_model_output: TFBaseModelOutputWithPooling = None
+ vision_model_output: TFBaseModelOutputWithPooling = None
+
+ def to_tuple(self) -> Tuple[Any]:
+ return tuple(
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
+ for k in self.keys()
+ )
+
+
+class TFGroupViTCrossAttentionLayer(keras.layers.Layer):
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.attn = TFGroupViTAttention(config, name="attn")
+ self.norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm2")
+ self.mlp = TFGroupViTMLP(config, name="mlp")
+ self.norm_post = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_post")
+ self.config = config
+
+ def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False) -> tf.Tensor:
+ x = query
+ x = x + self.attn(query, encoder_hidden_states=key)[0]
+ x = x + self.mlp(self.norm2(x))
+ x = self.norm_post(x)
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attn", None) is not None:
+ with tf.name_scope(self.attn.name):
+ self.attn.build(None)
+ if getattr(self, "norm2", None) is not None:
+ with tf.name_scope(self.norm2.name):
+ self.norm2.build([None, None, self.config.hidden_size])
+ if getattr(self, "mlp", None) is not None:
+ with tf.name_scope(self.mlp.name):
+ self.mlp.build(None)
+ if getattr(self, "norm_post", None) is not None:
+ with tf.name_scope(self.norm_post.name):
+ self.norm_post.build([None, None, self.config.hidden_size])
+
+
+class TFGroupViTAssignAttention(keras.layers.Layer):
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.scale = config.hidden_size**-0.5
+
+ self.q_proj = keras.layers.Dense(config.hidden_size, name="q_proj")
+ self.k_proj = keras.layers.Dense(config.hidden_size, name="k_proj")
+ self.v_proj = keras.layers.Dense(config.hidden_size, name="v_proj")
+ self.proj = keras.layers.Dense(config.hidden_size, name="proj")
+ self.assign_eps = config.assign_eps
+ self.config = config
+
+ def get_attn(self, attn: tf.Tensor, gumbel: bool = True, hard: bool = True, training: bool = False) -> tf.Tensor:
+ if gumbel and training:
+ attn = gumbel_softmax(attn, dim=-2, hard=hard)
+ else:
+ if hard:
+ attn = hard_softmax(attn, dim=-2)
+ else:
+ attn = stable_softmax(attn, axis=-2)
+
+ return attn
+
+ def call(self, query: tf.Tensor, key: tf.Tensor, training: bool = False):
+ value = key
+ # [batch_size, query_length, channels]
+ query = self.q_proj(query)
+
+ # [batch_size, key_length, channels]
+ key = self.k_proj(key)
+
+ # [batch_size, key_length, channels]
+ value = self.v_proj(value)
+
+ # [batch_size, query_length, key_length]
+ raw_attn = tf.matmul(query, key, transpose_b=True) * self.scale
+
+ attn = self.get_attn(raw_attn, training=training)
+ soft_attn = self.get_attn(raw_attn, training=training, gumbel=False, hard=False)
+
+ attn = attn / (tf.math.reduce_sum(attn, axis=-1, keepdims=True) + self.assign_eps)
+
+ out = tf.matmul(attn, value)
+
+ out = self.proj(out)
+
+ return out, soft_attn
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.config.hidden_size])
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.config.hidden_size])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.config.hidden_size])
+ if getattr(self, "proj", None) is not None:
+ with tf.name_scope(self.proj.name):
+ self.proj.build([None, None, self.config.hidden_size])
+
+
+class TFGroupViTTokenAssign(keras.layers.Layer):
+ def __init__(self, config: GroupViTVisionConfig, num_group_token: int, num_output_group: int, **kwargs):
+ super().__init__(**kwargs)
+ self.num_output_group = num_output_group
+ # norm on group_tokens
+ self.norm_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_tokens")
+ assign_mlp_ratio = (
+ config.assign_mlp_ratio
+ if isinstance(config.assign_mlp_ratio, collections.abc.Iterable)
+ else (config.assign_mlp_ratio, config.assign_mlp_ratio)
+ )
+ tokens_dim, channels_dim = [int(x * config.hidden_size) for x in assign_mlp_ratio]
+ self.mlp_inter = TFGroupViTMixerMLP(config, num_group_token, tokens_dim, num_output_group, name="mlp_inter")
+ self.norm_post_tokens = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_post_tokens")
+ # norm on x
+ self.norm_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_x")
+ self.pre_assign_attn = TFGroupViTCrossAttentionLayer(config, name="pre_assign_attn")
+
+ self.assign = TFGroupViTAssignAttention(config, name="assign")
+ self.norm_new_x = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="norm_new_x")
+ self.mlp_channels = TFGroupViTMLP(
+ config, config.hidden_size, channels_dim, config.hidden_size, name="mlp_channels"
+ )
+ self.config = config
+
+ def project_group_token(self, group_tokens: tf.Tensor) -> tf.Tensor:
+ """
+ Args:
+ group_tokens (tf.Tensor): group tokens, [batch_size, num_group_tokens, channels]
+
+ Returns:
+ projected_group_tokens (tf.Tensor): [batch_size, num_output_groups, channels]
+ """
+ # [B, num_output_groups, C] <- [B, num_group_tokens, C]
+ projected_group_tokens = self.mlp_inter(group_tokens)
+ projected_group_tokens = self.norm_post_tokens(projected_group_tokens)
+ return projected_group_tokens
+
+ def call(self, image_tokens: tf.Tensor, group_tokens: tf.Tensor, training: bool = False):
+ """
+ Args:
+ image_tokens (`tf.Tensor`): image tokens, of shape [batch_size, input_length, channels]
+ group_tokens (`tf.Tensor`): group tokens, [batch_size, num_group_tokens, channels]
+ """
+
+ group_tokens = self.norm_tokens(group_tokens)
+ image_tokens = self.norm_x(image_tokens)
+ # [batch_size, num_output_groups, channels]
+ projected_group_tokens = self.project_group_token(group_tokens)
+ projected_group_tokens = self.pre_assign_attn(projected_group_tokens, image_tokens)
+ new_image_tokens, attention = self.assign(projected_group_tokens, image_tokens)
+ new_image_tokens += projected_group_tokens
+
+ new_image_tokens = new_image_tokens + self.mlp_channels(self.norm_new_x(new_image_tokens))
+
+ return new_image_tokens, attention
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "norm_tokens", None) is not None:
+ with tf.name_scope(self.norm_tokens.name):
+ self.norm_tokens.build([None, None, self.config.hidden_size])
+ if getattr(self, "mlp_inter", None) is not None:
+ with tf.name_scope(self.mlp_inter.name):
+ self.mlp_inter.build(None)
+ if getattr(self, "norm_post_tokens", None) is not None:
+ with tf.name_scope(self.norm_post_tokens.name):
+ self.norm_post_tokens.build([None, None, self.config.hidden_size])
+ if getattr(self, "norm_x", None) is not None:
+ with tf.name_scope(self.norm_x.name):
+ self.norm_x.build([None, None, self.config.hidden_size])
+ if getattr(self, "pre_assign_attn", None) is not None:
+ with tf.name_scope(self.pre_assign_attn.name):
+ self.pre_assign_attn.build(None)
+ if getattr(self, "assign", None) is not None:
+ with tf.name_scope(self.assign.name):
+ self.assign.build(None)
+ if getattr(self, "norm_new_x", None) is not None:
+ with tf.name_scope(self.norm_new_x.name):
+ self.norm_new_x.build([None, None, self.config.hidden_size])
+ if getattr(self, "mlp_channels", None) is not None:
+ with tf.name_scope(self.mlp_channels.name):
+ self.mlp_channels.build(None)
+
+
+# Adapted from transformers.models.vit.modeling_tf_vit.TFViTPatchEmbeddings with ViT->GroupViT
+class TFGroupViTPatchEmbeddings(keras.layers.Layer):
+ """
+ This class turns `pixel_values` of shape `(batch_size, num_channels, height, width)` into the initial
+ `hidden_states` (patch embeddings) of shape `(batch_size, seq_length, hidden_size)` to be consumed by a
+ Transformer.
+ """
+
+ def __init__(self, config: GroupViTConfig, **kwargs):
+ super().__init__(**kwargs)
+ image_size, patch_size = config.image_size, config.patch_size
+ num_channels = config.num_channels
+ # hidden_size is a member as it will be required in the call method
+ self.hidden_size = config.hidden_size
+
+ image_size = image_size if isinstance(image_size, collections.abc.Iterable) else (image_size, image_size)
+ patch_size = patch_size if isinstance(patch_size, collections.abc.Iterable) else (patch_size, patch_size)
+ num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
+ self.image_size = image_size
+ self.patch_size = patch_size
+ self.num_patches = num_patches
+ self.num_channels = num_channels
+ self.config = config
+
+ self.projection = keras.layers.Conv2D(
+ filters=self.hidden_size,
+ kernel_size=patch_size,
+ strides=patch_size,
+ padding="valid",
+ data_format="channels_last",
+ use_bias=True,
+ kernel_initializer=get_initializer(self.config.initializer_range),
+ bias_initializer="zeros",
+ name="projection",
+ )
+
+ def call(
+ self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False
+ ) -> tf.Tensor:
+ batch_size, num_channels, height, width = shape_list(pixel_values)
+ if tf.executing_eagerly() and num_channels != self.num_channels:
+ raise ValueError(
+ "Make sure that the channel dimension of the pixel values match with the one set in the configuration."
+ )
+ if (
+ not interpolate_pos_encoding
+ and tf.executing_eagerly()
+ and (height != self.image_size[0] or width != self.image_size[1])
+ ):
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model ({self.image_size[0]}*{self.image_size[1]})."
+ )
+
+ # When running on CPU, `keras.layers.Conv2D` doesn't support `NCHW` format.
+ # So change the input format from `NCHW` to `NHWC`.
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
+
+ projection = self.projection(pixel_values)
+
+ # Change the 2D spatial dimensions to a single temporal dimension.
+ # shape = (batch_size, num_patches, out_channels=embed_dim)
+ num_patches = (width // self.patch_size[1]) * (height // self.patch_size[0])
+ # In the TFGroupViTVisionEmbeddings the embeddings from this layer will be layer normalized
+ # LayerNormalization layer needs to have static last dimension (otherwise the test_keras_save_load fails with symbolic tensors)
+ # This is why we have used the hidden_size in the reshape method
+ embeddings = tf.reshape(tensor=projection, shape=(batch_size, num_patches, self.hidden_size))
+
+ return embeddings
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "projection", None) is not None:
+ with tf.name_scope(self.projection.name):
+ self.projection.build([None, None, None, self.num_channels])
+
+
+# Adapted from transformers.vit.modeling_tf_vit.TFViTEmbeddings
+class TFGroupViTVisionEmbeddings(keras.layers.Layer):
+ """
+ Construct the position and patch embeddings.
+
+ """
+
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.patch_embeddings = TFGroupViTPatchEmbeddings(config, name="patch_embeddings")
+ self.dropout = keras.layers.Dropout(rate=config.dropout, name="dropout")
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
+ self.config = config
+
+ def build(self, input_shape=None):
+ num_patches = self.patch_embeddings.num_patches
+ self.position_embeddings = self.add_weight(
+ shape=(1, num_patches, self.config.hidden_size),
+ initializer="zeros",
+ trainable=True,
+ name="position_embeddings",
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "patch_embeddings", None) is not None:
+ with tf.name_scope(self.patch_embeddings.name):
+ self.patch_embeddings.build(None)
+ if getattr(self, "dropout", None) is not None:
+ with tf.name_scope(self.dropout.name):
+ self.dropout.build(None)
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, self.config.hidden_size])
+
+ def interpolate_pos_encoding(self, embeddings, height, width) -> tf.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images.
+
+ Source:
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
+ """
+
+ batch_size, num_patches, dim = shape_list(embeddings)
+ num_positions = shape_list(self.position_embeddings)[1]
+
+ if num_patches == num_positions and height == width:
+ return self.position_embeddings
+ patch_pos_embed = self.position_embeddings
+ h0 = height // self.config.patch_size
+ w0 = width // self.config.patch_size
+ patch_pos_embed = tf.image.resize(
+ images=tf.reshape(
+ patch_pos_embed, shape=(1, int(math.sqrt(num_positions)), int(math.sqrt(num_positions)), dim)
+ ),
+ size=(h0, w0),
+ method="bicubic",
+ )
+ patch_pos_embed = tf.reshape(tensor=patch_pos_embed, shape=(1, -1, dim))
+ return patch_pos_embed
+
+ def call(
+ self, pixel_values: tf.Tensor, interpolate_pos_encoding: bool = False, training: bool = False
+ ) -> tf.Tensor:
+ _, _, height, width = shape_list(pixel_values)
+ embeddings = self.patch_embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
+ embeddings = self.layernorm(embeddings)
+
+ # add positional encoding to each token
+ if interpolate_pos_encoding:
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+ else:
+ embeddings = embeddings + self.position_embeddings
+
+ embeddings = self.dropout(embeddings)
+
+ return embeddings
+
+
+# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextEmbeddings with CLIP->GroupViT
+class TFGroupViTTextEmbeddings(keras.layers.Layer):
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embed_dim = config.hidden_size
+
+ self.config = config
+
+ def build(self, input_shape: tf.TensorShape = None):
+ with tf.name_scope("token_embedding"):
+ self.weight = self.add_weight(
+ shape=(self.config.vocab_size, self.embed_dim),
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
+ trainable=True,
+ name="weight",
+ )
+
+ with tf.name_scope("position_embedding"):
+ self.position_embedding = self.add_weight(
+ shape=(self.config.max_position_embeddings, self.embed_dim),
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
+ trainable=True,
+ name="embeddings",
+ )
+
+ super().build(input_shape)
+
+ def call(
+ self,
+ input_ids: tf.Tensor = None,
+ position_ids: tf.Tensor = None,
+ inputs_embeds: tf.Tensor = None,
+ ) -> tf.Tensor:
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ if input_ids is None and inputs_embeds is None:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if inputs_embeds is None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if position_ids is None:
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
+
+ position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)
+ position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
+ final_embeddings = inputs_embeds + position_embeds
+
+ return final_embeddings
+
+
+class TFGroupViTStage(keras.layers.Layer):
+ """This corresponds to the `GroupingLayer` class in the GroupViT implementation."""
+
+ def __init__(
+ self,
+ config: GroupViTVisionConfig,
+ depth: int,
+ num_prev_group_token: int,
+ num_group_token: int,
+ num_output_group: int,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.config = config
+ self.depth = depth
+ self.num_group_token = num_group_token
+ self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(depth)]
+
+ if num_group_token > 0:
+ self.downsample = TFGroupViTTokenAssign(
+ config=config,
+ num_group_token=num_group_token,
+ num_output_group=num_output_group,
+ name="downsample",
+ )
+ else:
+ self.downsample = None
+
+ if num_prev_group_token > 0 and num_group_token > 0:
+ self.group_projector = [
+ keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="group_projector.0"),
+ TFGroupViTMixerMLP(
+ config, num_prev_group_token, config.hidden_size // 2, num_group_token, name="group_projector.1"
+ ),
+ ]
+ else:
+ self.group_projector = None
+
+ def build(self, input_shape=None):
+ if self.num_group_token > 0:
+ self.group_token = self.add_weight(
+ shape=(1, self.num_group_token, self.config.hidden_size),
+ initializer="zeros",
+ trainable=True,
+ name="group_token",
+ )
+ else:
+ self.group_token = None
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "downsample", None) is not None:
+ with tf.name_scope(self.downsample.name):
+ self.downsample.build(None)
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+ if getattr(self, "group_projector", None) is not None:
+ with tf.name_scope(self.group_projector[0].name):
+ self.group_projector[0].build([None, None, self.config.hidden_size])
+ with tf.name_scope(self.group_projector[1].name):
+ self.group_projector[1].build(None)
+
+ @property
+ def with_group_token(self):
+ return self.group_token is not None
+
+ def split_x(self, x: tf.Tensor) -> tf.Tensor:
+ if self.with_group_token:
+ return x[:, : -self.num_group_token], x[:, -self.num_group_token :]
+ else:
+ return x, None
+
+ def concat_x(self, x: tf.Tensor, group_token: tf.Tensor | None = None) -> tf.Tensor:
+ if group_token is None:
+ return x
+ return tf.concat([x, group_token], axis=1)
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ prev_group_token: tf.Tensor | None = None,
+ output_attentions: bool = False,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the grouping tensors of Grouping block.
+ """
+ if self.with_group_token:
+ group_token = tf.tile(self.group_token, multiples=(shape_list(hidden_states)[0], 1, 1))
+ if self.group_projector is not None:
+ for layer in self.group_projector:
+ prev_group_token = layer(prev_group_token)
+ group_token = group_token + prev_group_token
+ else:
+ group_token = None
+
+ x = hidden_states
+
+ cat_x = self.concat_x(x, group_token)
+ for layer in self.layers:
+ layer_out = layer(
+ cat_x,
+ attention_mask=None,
+ causal_attention_mask=None,
+ output_attentions=None,
+ )
+ cat_x = layer_out[0]
+
+ x, group_token = self.split_x(cat_x)
+
+ attention = None
+ if self.downsample is not None:
+ x, attention = self.downsample(x, group_token)
+
+ outputs = (x, group_token)
+ if output_attentions:
+ outputs = outputs + (attention,)
+
+ return outputs
+
+
+class TFGroupViTMLP(keras.layers.Layer):
+ def __init__(
+ self,
+ config: GroupViTVisionConfig,
+ hidden_size: Optional[int] = None,
+ intermediate_size: Optional[int] = None,
+ output_size: Optional[int] = None,
+ **kwargs,
+ ):
+ super().__init__(**kwargs)
+ self.config = config
+ self.activation_fn = get_tf_activation(config.hidden_act)
+ hidden_size = hidden_size if hidden_size is not None else config.hidden_size
+ intermediate_size = intermediate_size if intermediate_size is not None else config.intermediate_size
+ output_size = output_size if output_size is not None else hidden_size
+ self.fc1 = keras.layers.Dense(intermediate_size, name="fc1")
+ self.fc2 = keras.layers.Dense(output_size, name="fc2")
+ self.intermediate_size = intermediate_size
+ self.hidden_size = hidden_size
+
+ def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "fc1", None) is not None:
+ with tf.name_scope(self.fc1.name):
+ self.fc1.build([None, None, self.hidden_size])
+ if getattr(self, "fc2", None) is not None:
+ with tf.name_scope(self.fc2.name):
+ self.fc2.build([None, None, self.intermediate_size])
+
+
+class TFGroupViTMixerMLP(TFGroupViTMLP):
+ def call(self, x, training: bool = False):
+ x = super().call(hidden_states=tf.transpose(x, perm=(0, 2, 1)))
+ return tf.transpose(x, perm=(0, 2, 1))
+
+
+# Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPAttention
+class TFGroupViTAttention(keras.layers.Layer):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config: GroupViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embed_dim = config.hidden_size
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = self.embed_dim // self.num_attention_heads
+ if self.attention_head_size * self.num_attention_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_attention_heads})."
+ )
+
+ factor = config.initializer_factor
+ in_proj_std = (self.embed_dim**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor
+ out_proj_std = (self.embed_dim**-0.5) * factor
+
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
+
+ self.q_proj = keras.layers.Dense(
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="q_proj"
+ )
+ self.k_proj = keras.layers.Dense(
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="k_proj"
+ )
+ self.v_proj = keras.layers.Dense(
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="v_proj"
+ )
+
+ self.dropout = keras.layers.Dropout(rate=config.attention_dropout)
+
+ self.out_proj = keras.layers.Dense(
+ units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name="out_proj"
+ )
+
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention.transpose_for_scores
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
+
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor = None,
+ causal_attention_mask: tf.Tensor = None,
+ output_attentions: bool = None,
+ encoder_hidden_states: tf.Tensor = None,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ """Input shape: Batch x Time x Channel"""
+
+ batch_size = shape_list(hidden_states)[0]
+ is_cross_attention = encoder_hidden_states is not None
+
+ mixed_query_layer = self.q_proj(inputs=hidden_states)
+ if is_cross_attention:
+ mixed_key_layer = self.k_proj(inputs=encoder_hidden_states)
+ mixed_value_layer = self.v_proj(inputs=encoder_hidden_states)
+ else:
+ mixed_key_layer = self.k_proj(inputs=hidden_states)
+ mixed_value_layer = self.v_proj(inputs=hidden_states)
+
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ # (batch size, num_heads, seq_len_q, seq_len_k)
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
+ attention_scores = tf.divide(attention_scores, dk)
+
+ # apply the causal_attention_mask first
+ if causal_attention_mask is not None:
+ # Apply the causal attention mask (precomputed for all layers in TFCLIPModel call() function)
+ attention_scores = tf.add(attention_scores, causal_attention_mask)
+
+ if attention_mask is not None:
+ # Apply the attention mask (precomputed for all layers in TFCLIPModel call() function)
+ attention_scores = tf.add(attention_scores, attention_mask)
+
+ # Normalize the attention scores to probabilities.
+ _attention_probs = stable_softmax(logits=attention_scores, axis=-1)
+
+ # This is actually dropping out entire tokens to attend to, which might
+ # seem a bit unusual, but is taken from the original Transformer paper.
+ attention_probs = self.dropout(inputs=_attention_probs)
+
+ attention_output = tf.matmul(attention_probs, value_layer)
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
+
+ # (batch_size, seq_len_q, embed_dim)
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim))
+
+ attention_output = self.out_proj(attention_output)
+ # In TFBert, attention weights are returned after dropout.
+ # However, in CLIP, they are returned before dropout.
+ outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,)
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q_proj", None) is not None:
+ with tf.name_scope(self.q_proj.name):
+ self.q_proj.build([None, None, self.embed_dim])
+ if getattr(self, "k_proj", None) is not None:
+ with tf.name_scope(self.k_proj.name):
+ self.k_proj.build([None, None, self.embed_dim])
+ if getattr(self, "v_proj", None) is not None:
+ with tf.name_scope(self.v_proj.name):
+ self.v_proj.build([None, None, self.embed_dim])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.embed_dim])
+
+
+# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPEncoderLayer with CLIP->GroupViT
+class TFGroupViTEncoderLayer(keras.layers.Layer):
+ def __init__(self, config: GroupViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embed_dim = config.hidden_size
+ self.self_attn = TFGroupViTAttention(config, name="self_attn")
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1")
+ self.mlp = TFGroupViTMLP(config, name="mlp")
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2")
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ attention_mask: tf.Tensor,
+ causal_attention_mask: tf.Tensor,
+ output_attentions: bool,
+ training: bool = False,
+ ) -> Tuple[tf.Tensor]:
+ """
+ Args:
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`tf.Tensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ causal_attention_mask (`tf.Tensor`): causal attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ output_attentions (`bool`):
+ Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned
+ tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(inputs=hidden_states)
+ attention_outputs = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ causal_attention_mask=causal_attention_mask,
+ output_attentions=output_attentions,
+ training=training,
+ )
+ hidden_states = attention_outputs[0]
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.layer_norm2(inputs=hidden_states)
+ hidden_states = self.mlp(hidden_states=hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,) + attention_outputs[1:] # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "self_attn", None) is not None:
+ with tf.name_scope(self.self_attn.name):
+ self.self_attn.build(None)
+ if getattr(self, "layer_norm1", None) is not None:
+ with tf.name_scope(self.layer_norm1.name):
+ self.layer_norm1.build([None, None, self.embed_dim])
+ if getattr(self, "mlp", None) is not None:
+ with tf.name_scope(self.mlp.name):
+ self.mlp.build(None)
+ if getattr(self, "layer_norm2", None) is not None:
+ with tf.name_scope(self.layer_norm2.name):
+ self.layer_norm2.build([None, None, self.embed_dim])
+
+
+# Adapted from transformers.models.clip.modeling_tf_clip.TFGroupViTTextEncoder
+class TFGroupViTTextEncoder(keras.layers.Layer):
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.layers = [TFGroupViTEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)]
+
+ def call(
+ self,
+ hidden_states,
+ attention_mask: tf.Tensor,
+ causal_attention_mask: tf.Tensor,
+ output_attentions: bool,
+ output_hidden_states: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[Tuple, TFBaseModelOutput]:
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "layers", None) is not None:
+ for layer in self.layers:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+class TFGroupViTVisionEncoder(keras.layers.Layer):
+ def __init__(self, config: GroupViTVisionConfig, **kwargs) -> None:
+ super().__init__(**kwargs)
+
+ self.stages = [
+ TFGroupViTStage(
+ config=config,
+ depth=config.depths[i],
+ num_group_token=config.num_group_tokens[i],
+ num_output_group=config.num_output_groups[i],
+ num_prev_group_token=config.num_output_groups[i - 1] if i > 0 else 0,
+ name=f"stages_._{i}",
+ )
+ for i in range(len(config.depths))
+ ]
+
+ def call(
+ self,
+ hidden_states: tf.Tensor,
+ output_hidden_states: bool,
+ output_attentions: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[tuple, TFBaseModelOutput]:
+ all_hidden_states = () if output_hidden_states else None
+ all_groupings = () if output_attentions else None
+
+ group_tokens = None
+
+ for stage in self.stages:
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = stage(hidden_states, group_tokens, output_attentions)
+
+ hidden_states = layer_outputs[0]
+ group_tokens = layer_outputs[1]
+
+ if output_attentions and layer_outputs[2] is not None:
+ all_groupings = all_groupings + (layer_outputs[2],)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_groupings] if v is not None)
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_groupings
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "stages", None) is not None:
+ for layer in self.stages:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+
+# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextTransformer with CLIPText->GroupViTText, CLIPEncoder->GroupViTTextEncoder
+class TFGroupViTTextTransformer(keras.layers.Layer):
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embeddings = TFGroupViTTextEmbeddings(config, name="embeddings")
+ self.encoder = TFGroupViTTextEncoder(config, name="encoder")
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
+
+ # For `pooled_output` computation
+ self.eos_token_id = config.eos_token_id
+ self.embed_dim = config.hidden_size
+
+ def call(
+ self,
+ input_ids: TFModelInputType,
+ attention_mask: tf.Tensor,
+ position_ids: tf.Tensor,
+ output_attentions: bool,
+ output_hidden_states: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ input_shape = shape_list(input_ids)
+
+ embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids)
+
+ batch_size, seq_length = input_shape
+ # CLIP's text model uses causal mask, prepare it here.
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
+ causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype)
+
+ # check attention mask and invert
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
+ attention_mask = _expand_mask(attention_mask)
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ attention_mask=attention_mask,
+ causal_attention_mask=causal_attention_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+ sequence_output = self.final_layer_norm(inputs=sequence_output)
+
+ if self.eos_token_id == 2:
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
+ # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
+ # ------------------------------------------------------------
+ # text_embeds.shape = [batch_size, n_ctx, transformer.width]
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
+ pooled_output = tf.gather_nd(
+ params=sequence_output,
+ indices=tf.stack(
+ values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1
+ ),
+ )
+ else:
+ # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
+ pooled_output = tf.gather_nd(
+ params=sequence_output,
+ indices=tf.stack(
+ values=(
+ tf.range(input_shape[0], dtype=tf.int64),
+ tf.math.argmax(tf.cast(input_ids == self.eos_token_id, dtype=tf.int8), axis=-1),
+ ),
+ axis=1,
+ ),
+ )
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32):
+ # It is possible with an unspecified sequence length for seq_length to be
+ # a runtime value, which is unsupported by tf.constant. Per the TensorFlow
+ # docs, tf.fill can handle runtime dynamic shapes:
+ # https://www.tensorflow.org/api_docs/python/tf/fill
+ diag = tf.cast(tf.fill((seq_length,), 0.0), dtype)
+
+ # set an additive 2D attention mask with all places being masked
+ to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype)
+
+ # set diagonal & lower triangular parts to 0 (i.e. the places not to be masked)
+ # TIP: think the 2D matrix as the space of (query_seq, key_seq)
+ to_mask = tf.linalg.band_part(to_mask, 0, -1)
+ # to_mask = tf.linalg.band_part(to_mask, -1, 0)
+ to_mask = tf.linalg.set_diag(to_mask, diagonal=diag)
+
+ return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length))
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "final_layer_norm", None) is not None:
+ with tf.name_scope(self.final_layer_norm.name):
+ self.final_layer_norm.build([None, None, self.embed_dim])
+
+
+# Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPVisionTransformer
+class TFGroupViTVisionTransformer(keras.layers.Layer):
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.embeddings = TFGroupViTVisionEmbeddings(config, name="embeddings")
+ self.encoder = TFGroupViTVisionEncoder(config, name="encoder")
+ self.layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layernorm")
+ self.embed_dim = config.hidden_size
+
+ def call(
+ self,
+ pixel_values: TFModelInputType,
+ output_attentions: bool,
+ output_hidden_states: bool,
+ return_dict: bool,
+ training: bool = False,
+ ) -> Union[Tuple, TFBaseModelOutputWithPooling]:
+ embedding_output = self.embeddings(pixel_values)
+
+ encoder_outputs = self.encoder(
+ hidden_states=embedding_output,
+ output_hidden_states=output_hidden_states,
+ output_attentions=output_attentions,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+
+ # normalize the last hidden state
+ last_hidden_state = self.layernorm(last_hidden_state)
+ pooled_output = tf.math.reduce_mean(last_hidden_state, axis=1)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "layernorm", None) is not None:
+ with tf.name_scope(self.layernorm.name):
+ self.layernorm.build([None, None, self.embed_dim])
+
+
+@keras_serializable
+# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPTextMainLayer with CLIP->GroupViT
+class TFGroupViTTextMainLayer(keras.layers.Layer):
+ config_class = GroupViTTextConfig
+
+ def __init__(self, config: GroupViTTextConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.text_model = TFGroupViTTextTransformer(config, name="text_model")
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.text_model.embeddings
+
+ def set_input_embeddings(self, value: tf.Variable):
+ self.text_model.embeddings.weight = value
+ self.text_model.embeddings.vocab_size = shape_list(value)[0]
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ if input_ids is None:
+ raise ValueError("You have to specify input_ids")
+
+ input_shape = shape_list(input_ids)
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=input_shape, value=1)
+
+ text_model_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return text_model_outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "text_model", None) is not None:
+ with tf.name_scope(self.text_model.name):
+ self.text_model.build(None)
+
+
+@keras_serializable
+# Copied from transformers.models.clip.modeling_tf_clip.TFCLIPVisionMainLayer with CLIP->GroupViT
+class TFGroupViTVisionMainLayer(keras.layers.Layer):
+ config_class = GroupViTVisionConfig
+
+ def __init__(self, config: GroupViTVisionConfig, **kwargs):
+ super().__init__(**kwargs)
+ self.config = config
+ self.vision_model = TFGroupViTVisionTransformer(config, name="vision_model")
+
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.vision_model.embeddings
+
+ @unpack_inputs
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ vision_model_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return vision_model_outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "vision_model", None) is not None:
+ with tf.name_scope(self.vision_model.name):
+ self.vision_model.build(None)
+
+
+@keras_serializable
+# Adapted from transformers.models.clip.modeling_tf_clip.TFCLIPMainLayer
+class TFGroupViTMainLayer(keras.layers.Layer):
+ config_class = GroupViTConfig
+
+ def __init__(self, config: GroupViTConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ if not isinstance(config.text_config, GroupViTTextConfig):
+ raise ValueError(
+ "config.text_config is expected to be of type GroupViTTextConfig but is of type"
+ f" {type(config.text_config)}."
+ )
+
+ if not isinstance(config.vision_config, GroupViTVisionConfig):
+ raise ValueError(
+ "config.vision_config is expected to be of type GroupViTVisionConfig but is of type"
+ f" {type(config.vision_config)}."
+ )
+
+ self.config = config
+
+ text_config = config.text_config
+ vision_config = config.vision_config
+
+ self.projection_dim = config.projection_dim
+ self.projection_intermediate_dim = config.projection_intermediate_dim
+ self.text_embed_dim = text_config.hidden_size
+ self.vision_embed_dim = vision_config.hidden_size
+
+ self.text_model = TFGroupViTTextTransformer(text_config, name="text_model")
+ self.vision_model = TFGroupViTVisionTransformer(vision_config, name="vision_model")
+
+ self.visual_projection = [
+ keras.layers.Dense(self.projection_intermediate_dim, name="visual_projection.0"),
+ keras.layers.BatchNormalization(name="visual_projection.1", momentum=0.9, epsilon=1e-5),
+ keras.layers.ReLU(name="visual_projection.2"),
+ keras.layers.Dense(self.projection_dim, name="visual_projection.3"),
+ ]
+ self.text_projection = [
+ keras.layers.Dense(self.projection_intermediate_dim, name="text_projection.0"),
+ keras.layers.BatchNormalization(name="text_projection.1", momentum=0.9, epsilon=1e-5),
+ keras.layers.ReLU(name="text_projection.2"),
+ keras.layers.Dense(self.projection_dim, name="text_projection.3"),
+ ]
+
+ def build(self, input_shape=None):
+ self.logit_scale = self.add_weight(
+ shape=(1,),
+ initializer=keras.initializers.Constant(self.config.logit_scale_init_value),
+ trainable=True,
+ name="logit_scale",
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "text_model", None) is not None:
+ with tf.name_scope(self.text_model.name):
+ self.text_model.build(None)
+ if getattr(self, "vision_model", None) is not None:
+ with tf.name_scope(self.vision_model.name):
+ self.vision_model.build(None)
+ if getattr(self, "visual_projection", None) is not None:
+ with tf.name_scope(self.visual_projection[0].name):
+ self.visual_projection[0].build([None, None, None, self.vision_embed_dim])
+ with tf.name_scope(self.visual_projection[1].name):
+ self.visual_projection[1].build((None, self.projection_intermediate_dim))
+ with tf.name_scope(self.visual_projection[3].name):
+ self.visual_projection[3].build([None, None, None, self.projection_intermediate_dim])
+ if getattr(self, "text_projection", None) is not None:
+ with tf.name_scope(self.text_projection[0].name):
+ self.text_projection[0].build([None, None, None, self.text_embed_dim])
+ with tf.name_scope(self.text_projection[1].name):
+ self.text_projection[1].build((None, self.projection_intermediate_dim))
+ with tf.name_scope(self.text_projection[3].name):
+ self.text_projection[3].build([None, None, None, self.projection_intermediate_dim])
+
+ @unpack_inputs
+ def get_text_features(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> tf.Tensor:
+ if input_ids is None:
+ raise ValueError("You have to specify either input_ids")
+
+ input_shape = shape_list(input_ids)
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=input_shape, value=1)
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ pooled_output = text_outputs[1]
+ for layer in self.text_projection:
+ pooled_output = layer(pooled_output)
+
+ text_features = pooled_output
+ return text_features
+
+ @unpack_inputs
+ def get_image_features(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> tf.Tensor:
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ pooled_output = vision_outputs[1]
+ for layer in self.visual_projection:
+ pooled_output = layer(pooled_output)
+
+ image_features = pooled_output
+ return image_features
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ pixel_values: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_segmentation: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]:
+ if input_ids is None:
+ raise ValueError("You have to specify either input_ids")
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ input_shape = shape_list(input_ids)
+
+ if attention_mask is None:
+ attention_mask = tf.fill(dims=input_shape, value=1)
+ if output_segmentation:
+ output_attentions = True
+ vision_outputs = self.vision_model(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ text_outputs = self.text_model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ image_embeds = vision_outputs[1]
+ for layer in self.visual_projection:
+ image_embeds = layer(image_embeds)
+
+ text_embeds = text_outputs[1]
+ for layer in self.text_projection:
+ text_embeds = layer(text_embeds)
+
+ # normalized features
+ image_embeds = image_embeds / tf.norm(image_embeds, axis=-1, keepdims=True)
+ text_embeds = text_embeds / tf.norm(text_embeds, axis=-1, keepdims=True)
+
+ # cosine similarity as logits
+ logit_scale = tf.math.exp(self.logit_scale)
+ logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale
+ logits_per_image = tf.transpose(logits_per_text)
+
+ seg_logits = None
+ if output_segmentation:
+ # grouped features
+ # [batch_size_image, num_group, hidden_size]
+ image_group_embeds = vision_outputs[0]
+ # [batch_size_image*num_group, hidden_size]
+ image_group_embeds = tf.reshape(image_group_embeds, shape=(-1, shape_list(image_group_embeds)[-1]))
+ for layer in self.visual_projection:
+ image_group_embeds = layer(image_group_embeds)
+ if output_hidden_states:
+ attentions = vision_outputs[3]
+ else:
+ attentions = vision_outputs[2]
+ # [batch_size_image, num_group, height, width]
+ grouping = get_grouping_from_attentions(attentions, pixel_values.shape[2:])
+
+ # normalized features
+ image_group_embeds = image_group_embeds / tf.norm(
+ tensor=image_group_embeds, ord="euclidean", axis=-1, keepdims=True
+ )
+ # [batch_size_image x num_group, batch_size_text]
+ logits_per_image_group = tf.matmul(image_group_embeds, text_embeds, transpose_b=True) * logit_scale
+ # [batch_size_image, batch_size_text, num_group]
+ logits_per_image_group = tf.reshape(
+ logits_per_image_group, shape=(image_embeds.shape[0], -1, text_embeds.shape[0])
+ )
+ logits_per_image_group = tf.transpose(logits_per_image_group, perm=(0, 2, 1))
+
+ # [batch_size_image, batch_size_text, height x width]
+ flatten_grouping = tf.reshape(grouping, shape=(shape_list(grouping)[0], shape_list(grouping)[1], -1))
+
+ # [batch_size_image, batch_size_text, height, width]
+ seg_logits = tf.matmul(logits_per_image_group, flatten_grouping) * logit_scale
+ seg_logits = tf.reshape(
+ seg_logits, shape=(seg_logits.shape[0], seg_logits.shape[1], grouping.shape[2], grouping.shape[3])
+ )
+
+ loss = None
+ if return_loss:
+ loss = groupvit_loss(logits_per_text)[None, ...]
+
+ if not return_dict:
+ if seg_logits is not None:
+ output = (
+ logits_per_image,
+ logits_per_text,
+ seg_logits,
+ text_embeds,
+ image_embeds,
+ text_outputs,
+ vision_outputs,
+ )
+ else:
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
+ return ((loss,) + output) if loss is not None else output
+
+ return TFGroupViTModelOutput(
+ loss=loss,
+ logits_per_image=logits_per_image,
+ logits_per_text=logits_per_text,
+ segmentation_logits=seg_logits,
+ text_embeds=text_embeds,
+ image_embeds=image_embeds,
+ text_model_output=text_outputs,
+ vision_model_output=vision_outputs,
+ )
+
+
+class TFGroupViTPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = GroupViTConfig
+ base_model_prefix = "groupvit"
+
+
+GROUPVIT_START_DOCSTRING = r"""
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TF 2.0 models accepts two formats as inputs:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional arguments.
+
+ This second option is useful when using [`keras.Model.fit`] method which currently requires having all the
+ tensors in the first argument of the model call function: `model(inputs)`.
+
+ If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
+ first positional argument :
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+
+
+ Args:
+ config ([`GroupViTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+GROUPVIT_TEXT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False``):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+GROUPVIT_VISION_INPUTS_DOCSTRING = r"""
+ Args:
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]`, `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`CLIPImageProcessor.__call__`] for details.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False``):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+GROUPVIT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
+ [`CLIPImageProcessor.__call__`] for details.
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ return_loss (`bool`, *optional*):
+ Whether or not to return the contrastive loss.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False``):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+class TFGroupViTTextModel(TFGroupViTPreTrainedModel):
+ config_class = GroupViTTextConfig
+ main_input_name = "input_ids"
+
+ def __init__(self, config: GroupViTTextConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.groupvit = TFGroupViTTextMainLayer(config, name="groupvit")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTTextConfig)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from transformers import CLIPTokenizer, TFGroupViTTextModel
+
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> model = TFGroupViTTextModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
+ ```"""
+
+ outputs = self.groupvit(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "groupvit", None) is not None:
+ with tf.name_scope(self.groupvit.name):
+ self.groupvit.build(None)
+
+
+class TFGroupViTVisionModel(TFGroupViTPreTrainedModel):
+ config_class = GroupViTVisionConfig
+ main_input_name = "pixel_values"
+
+ def __init__(self, config: GroupViTVisionConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.groupvit = TFGroupViTVisionMainLayer(config, name="groupvit")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=GroupViTVisionConfig)
+ def call(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFGroupViTVisionModel
+
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> model = TFGroupViTVisionModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="tf")
+
+ >>> outputs = model(**inputs)
+ >>> last_hidden_state = outputs.last_hidden_state
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
+ ```"""
+
+ outputs = self.groupvit(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "groupvit", None) is not None:
+ with tf.name_scope(self.groupvit.name):
+ self.groupvit.build(None)
+
+
+@add_start_docstrings(GROUPVIT_START_DOCSTRING)
+class TFGroupViTModel(TFGroupViTPreTrainedModel):
+ config_class = GroupViTConfig
+
+ def __init__(self, config: GroupViTConfig, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.groupvit = TFGroupViTMainLayer(config, name="groupvit")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GROUPVIT_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def get_text_features(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> tf.Tensor:
+ r"""
+ Returns:
+ text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
+ the projection layer to the pooled output of [`TFGroupViTTextModel`].
+
+ Examples:
+
+ ```python
+ >>> from transformers import CLIPTokenizer, TFGroupViTModel
+
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> tokenizer = CLIPTokenizer.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
+ >>> text_features = model.get_text_features(**inputs)
+ ```"""
+
+ text_features = self.groupvit.get_text_features(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return text_features
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GROUPVIT_VISION_INPUTS_DOCSTRING)
+ def get_image_features(
+ self,
+ pixel_values: TFModelInputType | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> tf.Tensor:
+ r"""
+ Returns:
+ image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying
+ the projection layer to the pooled output of [`TFGroupViTVisionModel`].
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFGroupViTModel
+
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(images=image, return_tensors="tf")
+
+ >>> image_features = model.get_image_features(**inputs)
+ ```"""
+
+ image_features = self.groupvit.get_image_features(
+ pixel_values=pixel_values,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return image_features
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(GROUPVIT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @replace_return_docstrings(output_type=TFGroupViTModelOutput, config_class=GroupViTConfig)
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ pixel_values: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ return_loss: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ output_segmentation: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFGroupViTModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ Returns:
+
+ Examples:
+
+ ```python
+ >>> from PIL import Image
+ >>> import requests
+ >>> from transformers import AutoProcessor, TFGroupViTModel
+ >>> import tensorflow as tf
+
+ >>> model = TFGroupViTModel.from_pretrained("nvidia/groupvit-gcc-yfcc")
+ >>> processor = AutoProcessor.from_pretrained("nvidia/groupvit-gcc-yfcc")
+
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
+ >>> image = Image.open(requests.get(url, stream=True).raw)
+
+ >>> inputs = processor(
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True
+ ... )
+
+ >>> outputs = model(**inputs)
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
+ >>> probs = tf.math.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities
+ ```"""
+
+ outputs = self.groupvit(
+ input_ids=input_ids,
+ pixel_values=pixel_values,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ return_loss=return_loss,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ output_segmentation=output_segmentation,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ return outputs
+
+ def serving_output(self, output: TFGroupViTModelOutput) -> TFGroupViTModelOutput:
+ # TODO: As is this currently fails with saved_model=True, because
+ # TensorFlow cannot trace through nested dataclasses. Reference:
+ # https://github.com/huggingface/transformers/pull/16886
+ return output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "groupvit", None) is not None:
+ with tf.name_scope(self.groupvit.name):
+ self.groupvit.build(None)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/idefics/modeling_idefics.py b/venv/lib/python3.10/site-packages/transformers/models/idefics/modeling_idefics.py
new file mode 100644
index 0000000000000000000000000000000000000000..a01c2279c15586b86bc86e4a430da58c3e628c53
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/idefics/modeling_idefics.py
@@ -0,0 +1,1588 @@
+# coding=utf-8
+# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
+#
+# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
+# and OPT implementations in this library. It has been modified from its
+# original forms to accommodate minor architectural differences compared
+# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch Idefics model."""
+from dataclasses import dataclass
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import torch
+import torch.nn.functional as F
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import CrossEntropyLoss
+
+from ... import PreTrainedModel
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask_for_sdpa
+from ...modeling_outputs import ModelOutput
+from ...modeling_utils import PretrainedConfig
+from ...pytorch_utils import ALL_LAYERNORM_LAYERS
+from ...utils import (
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_idefics import IdeficsConfig
+from .perceiver import IdeficsPerceiverResampler
+from .vision import IdeficsVisionTransformer
+
+
+logger = logging.get_logger(__name__)
+
+_CONFIG_FOR_DOC = "IdeficsConfig"
+
+
+from ..deprecated._archive_maps import IDEFICS_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class IdeficsBaseModelOutputWithPast(ModelOutput):
+ """
+ Base class for Idefics model's outputs that may also contain a past key/values (to speed up sequential decoding).
+
+ Args:
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+
+ If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1,
+ hidden_size)` is output.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and optionally if
+ `config.is_encoder_decoder=True` 2 additional tensors of shape `(batch_size, num_heads,
+ encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and optionally if
+ `config.is_encoder_decoder=True` in the cross-attention blocks) that can be used (see `past_key_values`
+ input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
+ sequence_length, hidden_size)`.
+
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
+ """
+
+ last_hidden_state: torch.FloatTensor = None
+ past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+@dataclass
+class IdeficsCausalLMOutputWithPast(ModelOutput):
+ """
+ Base class for Idefics causal language model (or autoregressive) outputs.
+
+ Args:
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
+ Language modeling loss (for next-token prediction).
+ logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
+ Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`)
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
+ `past_key_values` input) to speed up sequential decoding.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
+ Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
+ sequence_length, hidden_size)`.
+
+ image_hidden_states of the model produced by the vision encoder, and optionally by the perceiver
+ """
+
+ loss: Optional[torch.FloatTensor] = None
+ logits: torch.FloatTensor = None
+ past_key_values: Optional[List[torch.FloatTensor]] = None
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
+ image_hidden_states: Optional[Tuple[torch.FloatTensor]] = None
+
+
+def expand_inputs_for_generation(
+ input_ids,
+ expand_size=1,
+ is_encoder_decoder=False,
+ attention_mask=None,
+ encoder_outputs=None,
+ **model_kwargs,
+):
+ expanded_return_idx = (
+ torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device)
+ )
+ input_ids = input_ids.index_select(0, expanded_return_idx)
+ model_kwargs["pixel_values"] = model_kwargs.get("pixel_values", None)
+ model_kwargs["image_encoder_embeddings"] = model_kwargs.get("image_encoder_embeddings", None)
+ model_kwargs["perceiver_embeddings"] = model_kwargs.get("perceiver_embeddings", None)
+ model_kwargs["image_attention_mask"] = model_kwargs.get("image_attention_mask", None)
+
+ if "token_type_ids" in model_kwargs:
+ token_type_ids = model_kwargs["token_type_ids"]
+ model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx)
+
+ if attention_mask is not None:
+ model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx)
+
+ if model_kwargs["image_attention_mask"] is not None:
+ model_kwargs["image_attention_mask"] = model_kwargs["image_attention_mask"].index_select(
+ 0, expanded_return_idx
+ )
+
+ if model_kwargs["pixel_values"] is not None:
+ model_kwargs["pixel_values"] = model_kwargs["pixel_values"].index_select(0, expanded_return_idx)
+
+ elif model_kwargs["image_encoder_embeddings"] is not None:
+ model_kwargs["image_encoder_embeddings"] = model_kwargs["image_encoder_embeddings"].index_select(
+ 0, expanded_return_idx
+ )
+
+ elif model_kwargs["perceiver_embeddings"] is not None:
+ model_kwargs["perceiver_embeddings"] = model_kwargs["perceiver_embeddings"].index_select(
+ 0, expanded_return_idx
+ )
+
+ return input_ids, model_kwargs
+
+
+def prepare_inputs_for_generation(input_ids, past_key_values=None, **kwargs):
+ token_type_ids = kwargs.get("token_type_ids", None)
+ # only last token for inputs_ids if past is defined in kwargs
+ if past_key_values:
+ input_ids = input_ids[:, -1].unsqueeze(-1)
+ if token_type_ids is not None:
+ token_type_ids = token_type_ids[:, -1].unsqueeze(-1)
+
+ attention_mask = kwargs.get("attention_mask", None)
+ position_ids = kwargs.get("position_ids", None)
+
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ if past_key_values:
+ position_ids = position_ids[:, -1].unsqueeze(-1)
+
+ pixel_values = kwargs.get("pixel_values", None)
+ image_encoder_embeddings = kwargs.get("image_encoder_embeddings", None)
+ perceiver_embeddings = kwargs.get("perceiver_embeddings", None)
+ image_attention_mask = kwargs.get("image_attention_mask", None)
+ interpolate_pos_encoding = kwargs.get("interpolate_pos_encoding", False)
+
+ return {
+ "input_ids": input_ids,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ "position_ids": position_ids,
+ "attention_mask": attention_mask,
+ "token_type_ids": token_type_ids,
+ "pixel_values": pixel_values,
+ "image_encoder_embeddings": image_encoder_embeddings,
+ "perceiver_embeddings": perceiver_embeddings,
+ "image_attention_mask": image_attention_mask,
+ "interpolate_pos_encoding": interpolate_pos_encoding,
+ }
+
+
+def freeze_model(model, module_exceptions=[]):
+ mapping = {
+ "LayerNorm": nn.LayerNorm,
+ "Linear": nn.Linear,
+ "Embedding": nn.Embedding,
+ }
+ module_exceptions_mapped = [mapping[m] for m in module_exceptions]
+ for module in model.modules():
+ if module_exceptions and any(isinstance(module, t) for t in module_exceptions_mapped):
+ module.requires_grad_(True) # Explicitely setting it to true to avoid any mistakes
+ else:
+ module.requires_grad_(False)
+ return model
+
+
+class IdeficsDecoupledEmbedding(nn.Embedding):
+ # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/sparse.html#Embedding
+ """
+ Implements a decoupling of parameters to allow freezing (or not) a subset of the embeddings. In practise, the
+ regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `num_additional_embeddings` > 0,
+ then it will create `num_additional_embeddings` additional parameters that are always trained. If
+ `num_additional_embeddings=0`, then the module defaults back to the regular behavior of `nn.Embedding`.
+ """
+
+ def __init__(
+ self,
+ num_embeddings,
+ num_additional_embeddings,
+ embedding_dim,
+ partially_freeze: Optional[bool] = False,
+ device=None,
+ dtype=None,
+ padding_idx=None,
+ **kwargs,
+ ) -> None:
+ """
+ Args:
+ num_embeddings (`int`):
+ Size of the dictionary of embeddings
+ num_additional_embeddings (`int`):
+ Number of additional embeddings. Only useful when you `partially_freeze=True`.
+ embedding_dim (`int`):
+ The size of each embedding vector
+ partially_freeze: (`bool`, *optional*, defaults to `False`):
+ If `True`, the regular `weight` will be frozen. `additional_weight` is never frozen.
+ padding_idx (`int`, *optional*):
+ The padding index (needs to be less than num_embeddings)
+
+ Note: there are a lot of other parameters to initialize a standard `nn.Embedding` such as `padding_idx`,
+ `max_norm` or `norm_type`. We are not supporting these.
+ """
+ if padding_idx is not None and padding_idx > num_embeddings:
+ raise ValueError(f"padding_idx must be within num_embeddings. Got {padding_idx} and {num_embeddings}")
+ super().__init__(
+ num_embeddings=num_embeddings,
+ embedding_dim=embedding_dim,
+ device=device,
+ dtype=dtype,
+ padding_idx=padding_idx,
+ **kwargs,
+ )
+ self.num_embeddings = num_embeddings
+ self.padding_idx = padding_idx
+ self.num_additional_embeddings = num_additional_embeddings
+ self.partially_freeze = partially_freeze
+
+ if partially_freeze:
+ self.weight.requires_grad_(False)
+
+ if self.num_additional_embeddings > 0:
+ self.additional_embedding = nn.Embedding(
+ num_embeddings=self.num_additional_embeddings,
+ embedding_dim=embedding_dim,
+ device=device,
+ dtype=dtype,
+ )
+
+ def forward(self, input_ids):
+ """
+ we have 2 embeddings, with different indices - one pretrained self.weight and another
+ self.additional_embedding.weight that is being trained.
+
+ in order to make a lookup of the input ids, we:
+ 1. find out the indices of the entries belonging to the 2nd embedding
+ 2. extract those values while subtracting the size of the first embedding (num_embeddings), since the 2nd
+ embedding starts from 0 and not num_embeddings
+ 3. perform the 2nd embedding lookup
+ 4. now we handle the 1st embedding, we overwrite indices belonging to the 2nd embedding with a padding index
+ 5. perform the 1st embedding lookup
+ 6. now we overwrite the values in the 1st embedding lookup with the values of the 2nd embedding lookup
+
+ note: for the 1st embedding lookup we could have looked up only the low indices and not do the padding, but
+ then we have to create a new tensor and populate it with 2 tensors that are spread out across various indices -
+ i.e. not a simple concat - I haven't benchmarked the complex case if it's any faster, given that seqlens are
+ usually relatively short it's probably not faster or if faster not by much - but might be a good idea to
+ measure.
+
+ """
+ if self.num_additional_embeddings == 0:
+ return F.embedding(input_ids, self.weight)
+
+ # Clone so that we don't modify the original input_ids later on
+ input_ids = input_ids.clone()
+ additional_vocab_indices = torch.where(input_ids >= self.num_embeddings)
+ input_ids_additional_vocab = input_ids[additional_vocab_indices]
+ additional_embeddings = self.additional_embedding(input_ids_additional_vocab - self.num_embeddings)
+
+ # for successful lookup replace input_ids with 0, the results of these will be discarded anyway
+ input_ids[additional_vocab_indices] = 0
+ full_vector = F.embedding(input_ids, self.weight)
+
+ # overwrite the records with high indices
+ full_vector[additional_vocab_indices] = additional_embeddings
+
+ return full_vector
+
+ def extra_repr(self) -> str:
+ return "num_embeddings={}, num_additional_embeddings={}, embedding_dim={}, partially_freeze={}".format(
+ self.num_embeddings,
+ self.num_additional_embeddings,
+ self.embedding_dim,
+ self.partially_freeze,
+ )
+
+
+class IdeficsDecoupledLinear(nn.Linear):
+ # Derived from https://pytorch.org/docs/stable/_modules/torch/nn/modules/linear.html#Linear
+ """
+ Implements a decoupling of parameters to allow freezing (or not) a subset of the parameters. In practise, the
+ regular `weight` can be trained or frozen (i.e. `partially_freeze=True`), and if `out_additional_features` > 0,
+ then it will create `out_additional_features * in_features` additional parameters that are always trained. If
+ `out_additional_features=0`, then the module defaults back to the regular behavior of `nn.Linear`.
+ """
+
+ def __init__(
+ self,
+ in_features: int,
+ out_features: int,
+ out_additional_features: int = 0,
+ bias: bool = True,
+ partially_freeze: bool = True,
+ device=None,
+ dtype=None,
+ ) -> None:
+ """
+ out_additional_features: int. Number of additional trainable dimensions. Only makes sense when
+ `partially_freeze=True`. partially_freeze: bool. If True, the regular `weight` will be frozen and extra
+ parameters (if any) will be trainable. If False, default to the regular behavior of nn.Linear.
+ """
+ super().__init__(in_features, out_features, bias, device, dtype)
+ self.out_additional_features = out_additional_features
+ self.partially_freeze = partially_freeze
+
+ self.in_features = in_features
+ self.out_features = out_features
+
+ if partially_freeze:
+ self.weight.requires_grad_(False)
+ if bias:
+ self.bias.requires_grad_(False)
+
+ if out_additional_features > 0:
+ self.additional_fc = nn.Linear(
+ in_features=in_features,
+ out_features=out_additional_features,
+ bias=bias,
+ device=device,
+ dtype=dtype,
+ )
+
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
+ output = F.linear(input, self.weight, self.bias)
+
+ if self.out_additional_features > 0:
+ additional_features = self.additional_fc(input)
+ output = torch.cat((output, additional_features), -1)
+
+ return output
+
+ def extra_repr(self) -> str:
+ """Overwriting `nn.Linear.extra_repr` to include new parameters."""
+ return "in_features={}, out_features={}, out_additional_features={}, bias={}, partially_freeze={}".format(
+ self.in_features,
+ self.out_features,
+ self.out_additional_features,
+ self.bias is not None,
+ self.partially_freeze,
+ )
+
+
+# this was adapted from LlamaRMSNorm
+class IdeficsRMSNorm(nn.Module):
+ def __init__(self, hidden_size, eps=1e-6):
+ """
+ IdeficsRMSNorm is equivalent to T5LayerNorm
+ """
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(hidden_size))
+ self.variance_epsilon = eps
+
+ def forward(self, hidden_states):
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
+
+ # convert into half-precision if necessary
+ if self.weight.dtype in [torch.float16, torch.bfloat16]:
+ hidden_states = hidden_states.to(self.weight.dtype)
+
+ return self.weight * hidden_states
+
+
+ALL_LAYERNORM_LAYERS.append(IdeficsRMSNorm)
+
+
+# this was adapted from LlamaRotaryEmbedding
+class IdeficsEmbedding(torch.nn.Module):
+ def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None):
+ super().__init__()
+
+ self.dim = dim
+ self.max_position_embeddings = max_position_embeddings
+ self.base = base
+ inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim))
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
+
+ # Build here to make `torch.jit.trace` work.
+ self._set_cos_sin_cache(
+ seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype()
+ )
+
+ def _set_cos_sin_cache(self, seq_len, device, dtype):
+ self.max_seq_len_cached = seq_len
+ t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.int64).type_as(self.inv_freq)
+
+ freqs = torch.einsum("i,j->ij", t, self.inv_freq)
+ # Different from paper, but it uses a different permutation in order to obtain the same calculation
+ emb = torch.cat((freqs, freqs), dim=-1)
+ self.register_buffer("cos_cached", emb.cos().to(dtype), persistent=False)
+ self.register_buffer("sin_cached", emb.sin().to(dtype), persistent=False)
+
+ def forward(self, x, seq_len=None):
+ # x: [bs, num_attention_heads, seq_len, head_size]
+ if seq_len > self.max_seq_len_cached:
+ self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=x.dtype)
+
+ return (
+ self.cos_cached[:seq_len].to(dtype=x.dtype),
+ self.sin_cached[:seq_len].to(dtype=x.dtype),
+ )
+
+
+def rotate_half(x):
+ """Rotates half the hidden dims of the input."""
+ x1 = x[..., : x.shape[-1] // 2]
+ x2 = x[..., x.shape[-1] // 2 :]
+ return torch.cat((-x2, x1), dim=-1)
+
+
+# Copied from transformers.models.mistral.modeling_mistral.apply_rotary_pos_emb
+def apply_rotary_pos_emb(q, k, cos, sin, position_ids, unsqueeze_dim=1):
+ """Applies Rotary Position Embedding to the query and key tensors.
+
+ Args:
+ q (`torch.Tensor`): The query tensor.
+ k (`torch.Tensor`): The key tensor.
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
+ position_ids (`torch.Tensor`):
+ The position indices of the tokens corresponding to the query and key tensors. For example, this can be
+ used to pass offsetted position ids when working with a KV-cache.
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
+ Returns:
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
+ """
+ cos = cos[position_ids].unsqueeze(unsqueeze_dim)
+ sin = sin[position_ids].unsqueeze(unsqueeze_dim)
+ q_embed = (q * cos) + (rotate_half(q) * sin)
+ k_embed = (k * cos) + (rotate_half(k) * sin)
+ return q_embed, k_embed
+
+
+# this was adapted from LlamaMLP
+class IdeficsMLP(nn.Module):
+ def __init__(
+ self,
+ hidden_size: int,
+ intermediate_size: int,
+ hidden_act: str,
+ ):
+ super().__init__()
+ self.gate_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
+ self.down_proj = nn.Linear(intermediate_size, hidden_size, bias=False)
+ self.up_proj = nn.Linear(hidden_size, intermediate_size, bias=False)
+ self.act_fn = ACT2FN[hidden_act]
+
+ def forward(self, x):
+ return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
+
+
+# this was adapted from LlamaAttention
+class IdeficsAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ hidden_size: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_cross_attention: bool = False,
+ config: PretrainedConfig = None,
+ qk_layer_norms: bool = False,
+ ):
+ super().__init__()
+ self.hidden_size = hidden_size
+ self.num_heads = num_heads
+ self.head_dim = hidden_size // num_heads
+ self.dropout = dropout
+ self.is_causal = True
+
+ if (self.head_dim * num_heads) != self.hidden_size:
+ raise ValueError(
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
+ f" and `num_heads`: {num_heads})."
+ )
+
+ self.is_cross_attention = is_cross_attention
+
+ if not hasattr(nn.functional, "scaled_dot_product_attention"):
+ raise ValueError("this model requires pytorch 2.0 or higher")
+
+ if self.is_cross_attention:
+ kv_input_dim = (
+ self.hidden_size if not hasattr(config.vision_config, "embed_dim") else config.vision_config.embed_dim
+ )
+ self.q_proj = nn.Linear(
+ self.hidden_size,
+ num_heads * self.head_dim,
+ bias=False,
+ )
+ self.k_proj = nn.Linear(kv_input_dim, num_heads * self.head_dim, bias=False)
+ self.v_proj = nn.Linear(
+ kv_input_dim,
+ num_heads * self.head_dim,
+ bias=False,
+ )
+ else:
+ self.q_proj = nn.Linear(
+ self.hidden_size,
+ num_heads * self.head_dim,
+ bias=False,
+ )
+ self.k_proj = nn.Linear(
+ self.hidden_size,
+ num_heads * self.head_dim,
+ bias=False,
+ )
+ self.v_proj = nn.Linear(
+ self.hidden_size,
+ num_heads * self.head_dim,
+ bias=False,
+ )
+ self.o_proj = nn.Linear(
+ num_heads * self.head_dim,
+ hidden_size,
+ bias=False,
+ )
+ self.rotary_emb = IdeficsEmbedding(self.head_dim)
+
+ self.qk_layer_norms = qk_layer_norms
+ if self.qk_layer_norms:
+ self.q_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps)
+ self.k_layer_norm = IdeficsRMSNorm(self.head_dim, eps=config.rms_norm_eps)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: bool = False,
+ use_cache: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ is_cross_attention = self.is_cross_attention or key_value_states is not None
+
+ bsz, q_len, _ = hidden_states.size()
+
+ query_states = self.q_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ if not is_cross_attention:
+ key_states = self.k_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ value_states = self.v_proj(hidden_states).view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
+ else:
+ _, kv_len, _ = key_value_states.size() # Note that, in this case, `kv_len` == `kv_seq_len`
+ key_states = self.k_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)
+ value_states = (
+ self.v_proj(key_value_states).view(bsz, kv_len, self.num_heads, self.head_dim).transpose(1, 2)
+ )
+
+ kv_seq_len = key_states.shape[-2]
+ if past_key_value is not None:
+ kv_seq_len += past_key_value[0].shape[-2]
+ if not is_cross_attention:
+ cos, sin = self.rotary_emb(value_states, seq_len=max(kv_seq_len, q_len))
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin, position_ids)
+ # [bsz, nh, t, hd]
+
+ if past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+
+ past_key_value = (key_states, value_states) if use_cache else None
+
+ if self.qk_layer_norms:
+ query_states = self.q_layer_norm(query_states)
+ key_states = self.k_layer_norm(key_states)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
+ )
+
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
+ if query_states.device.type == "cuda" and attention_mask is not None:
+ query_states = query_states.contiguous()
+ key_states = key_states.contiguous()
+ value_states = value_states.contiguous()
+
+ attn_output = nn.functional.scaled_dot_product_attention(
+ query_states,
+ key_states,
+ value_states,
+ attn_mask=attention_mask,
+ dropout_p=self.dropout,
+ # The q_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create a causal mask in case q_len == 1.
+ is_causal=self.is_causal and attention_mask is None and q_len > 1,
+ )
+
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
+
+ attn_output = self.o_proj(attn_output)
+
+ attn_weights = None
+ if output_attentions:
+ logger.warning_once(
+ "attn_weights are not extracted in scaled_dot_product_attention. The model returns None instead"
+ )
+
+ return attn_output, attn_weights, past_key_value
+
+
+# this was adapted from LlamaDecoderLayer
+class IdeficsDecoderLayer(nn.Module):
+ def __init__(self, config: IdeficsConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.self_attn = IdeficsAttention(
+ hidden_size=self.hidden_size,
+ num_heads=config.num_attention_heads,
+ dropout=config.dropout,
+ config=config,
+ )
+ self.mlp = IdeficsMLP(
+ hidden_size=self.hidden_size,
+ intermediate_size=config.intermediate_size,
+ hidden_act=config.hidden_act,
+ )
+ self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.dropout = config.dropout
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class IdeficsGatedCrossAttentionLayer(nn.Module):
+ def __init__(self, config: IdeficsConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.cross_attn = IdeficsAttention(
+ hidden_size=self.hidden_size,
+ num_heads=config.num_attention_heads,
+ is_cross_attention=True,
+ dropout=config.dropout,
+ config=config,
+ qk_layer_norms=config.qk_layer_norms,
+ )
+ self.mlp = IdeficsMLP(
+ hidden_size=self.hidden_size,
+ intermediate_size=config.intermediate_size,
+ hidden_act=config.hidden_act,
+ )
+ self.input_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.post_attention_layernorm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+ self.config = config.dropout
+
+ self.act_cross_attn = nn.Tanh()
+ self.act_dense = nn.Tanh()
+
+ if config.alpha_initializer == "zeros":
+ if config.alpha_type == "vector":
+ self.alpha_cross_attn = nn.Parameter(torch.zeros(1, 1, self.hidden_size))
+ self.alpha_dense = nn.Parameter(torch.zeros(1, 1, self.hidden_size))
+ elif config.alpha_type == "float":
+ self.alpha_cross_attn = nn.Parameter(torch.zeros(1))
+ self.alpha_dense = nn.Parameter(torch.zeros(1))
+ else:
+ raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
+
+ elif config.alpha_initializer == "ones":
+ if config.alpha_type == "vector":
+ self.alpha_cross_attn = nn.Parameter(torch.ones(1, 1, self.hidden_size))
+ self.alpha_dense = nn.Parameter(torch.ones(1, 1, self.hidden_size))
+ elif config.alpha_type == "float":
+ self.alpha_cross_attn = nn.Parameter(torch.ones(1))
+ self.alpha_dense = nn.Parameter(torch.ones(1))
+ else:
+ raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
+
+ elif config.alpha_initializer in {"normal", "gaussian", "random"}:
+ if config.alpha_type == "vector":
+ self.alpha_cross_attn = nn.Parameter(
+ torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size))
+ )
+ self.alpha_dense = nn.Parameter(
+ torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1, 1, self.hidden_size))
+ )
+ elif config.alpha_type == "float":
+ self.alpha_cross_attn = nn.Parameter(
+ torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1))
+ )
+ self.alpha_dense = nn.Parameter(torch.normal(mean=0.0, std=config.alphas_initializer_range, size=(1)))
+ else:
+ raise ValueError(f"Unknown value for `alpha_type` ({config.alpha_type})")
+
+ else:
+ raise NotImplementedError(f"Alpha initialization scheme {config.alpha_initializer} not yet implemented!")
+
+ if not (hasattr(self, "alpha_cross_attn") and hasattr(self, "alpha_dense")):
+ raise ValueError("Alpha parameters not initialized correctly!")
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ image_hidden_states: Optional[torch.Tensor] = None,
+ image_attention_mask: Optional[torch.Tensor] = None,
+ cross_attention_gate: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = False,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ image_attention_mask (`torch.FloatTensor`, *optional*): image attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ cross_attention_gate (`torch.FloatTensor`, *optional*):
+ gate of size `(batch, seq_len)` used to zero-out cross-attention output for tokens attending no images.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
+ """
+ if image_hidden_states is None:
+ raise ValueError(
+ "`image_hidden_states` is required for Idefics cross attention module which are visual features to be"
+ " conditioned on."
+ )
+
+ if cross_attention_gate is None:
+ raise ValueError(
+ "`cross_attention_gate` is required for Idefics cross attention module to zero-out the cross-attention hidden_states attending to no images."
+ )
+
+ if past_key_value is not None:
+ raise NotImplementedError("Past key value states are not implemented for Idefics cross attention module.")
+
+ residual = hidden_states
+
+ hidden_states = self.input_layernorm(hidden_states)
+
+ # Self Attention
+ hidden_states, self_attn_weights, present_key_value = self.cross_attn(
+ hidden_states=hidden_states,
+ key_value_states=image_hidden_states,
+ attention_mask=image_attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
+ # Fill in zeros for cross_attention hidden_states of tokens attending to no images
+ hidden_states[cross_attention_gate == 0] = hidden_states[cross_attention_gate == 0].fill_(0)
+ hidden_states = residual + self.act_cross_attn(self.alpha_cross_attn) * hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.post_attention_layernorm(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.config, training=self.training)
+ hidden_states = residual + self.act_dense(self.alpha_dense) * hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+LLAMA_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`IdeficsConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+@add_start_docstrings(
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
+ LLAMA_START_DOCSTRING,
+)
+class IdeficsPreTrainedModel(PreTrainedModel):
+ config_class = IdeficsConfig
+ base_model_prefix = "model"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["IdeficsDecoderLayer", "IdeficsGatedCrossAttentionLayer"]
+ _supports_sdpa = True
+
+ def _init_weights(self, module):
+ # important: this ported version of Idefics isn't meant for training from scratch - only
+ # inference and fine-tuning - so the proper init weights code has been removed - the m4 code
+ # base should be used for training from scratch and it contains the correct code.
+ std = self.config.initializer_range
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ # Adapted from transformers.modeling_utils.PreTrainedModel._check_and_enable_sdpa
+ @classmethod
+ def _check_and_enable_sdpa(cls, config, hard_check_only: bool = False) -> PretrainedConfig:
+ # We remove the checks on `is_torch_sdpa_available()` and `cls._supports_sdpa` as Falcon supports SDPA from torch==2.0.0 (no requirement on 2.1).
+ _is_bettertransformer = getattr(cls, "use_bettertransformer", False)
+ if _is_bettertransformer:
+ return config
+
+ if not hard_check_only:
+ config._attn_implementation = "sdpa"
+ return config
+
+
+LLAMA_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
+ it.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ If `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see
+ `past_key_values`).
+
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
+ information on the default strategy.
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
+ LLAMA_START_DOCSTRING,
+)
+class IdeficsModel(IdeficsPreTrainedModel):
+ """
+ Transformer decoder consisting of `config.num_hidden_layers` layers. Each layer is a [`IdeficsDecoderLayer`]
+
+ Args:
+ config: IdeficsConfig
+ """
+
+ def __init__(self, config: IdeficsConfig):
+ super().__init__(config)
+ self.config = config
+ self.padding_idx = config.pad_token_id
+ self.vocab_size = config.vocab_size
+
+ self.embed_tokens = IdeficsDecoupledEmbedding(
+ num_embeddings=config.vocab_size,
+ num_additional_embeddings=config.additional_vocab_size,
+ embedding_dim=config.hidden_size,
+ partially_freeze=config.freeze_text_layers,
+ padding_idx=self.padding_idx,
+ )
+
+ self.image_size = config.vision_config.image_size
+ self.vision_config = config.vision_config
+ self.vision_model = IdeficsVisionTransformer(config.vision_config)
+
+ # Perceiver Resampler
+ if config.use_resampler:
+ perceiver_config = config.perceiver_config
+ self.perceiver_resampler = IdeficsPerceiverResampler(
+ config,
+ config.vision_config.embed_dim,
+ perceiver_config.resampler_depth,
+ perceiver_config.resampler_n_heads,
+ perceiver_config.resampler_head_dim,
+ perceiver_config.resampler_n_latents,
+ )
+
+ self.layers = nn.ModuleList([IdeficsDecoderLayer(config) for _ in range(config.num_hidden_layers)])
+
+ self.cross_layer_interval = config.cross_layer_interval
+ num_cross_layers = config.num_hidden_layers // self.cross_layer_interval
+ self.gated_cross_attn_layers = nn.ModuleList(
+ [IdeficsGatedCrossAttentionLayer(config) for _ in range(num_cross_layers)]
+ )
+ self.gradient_checkpointing = False
+
+ self.norm = IdeficsRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ self.freeze_relevant_params(config)
+
+ def freeze_relevant_params(self, config=None):
+ if config is None:
+ config = self.config
+
+ if config.freeze_text_layers:
+ self.freeze_text_layers(config.freeze_text_module_exceptions)
+
+ if config.freeze_vision_layers:
+ freeze_model(self.vision_model, module_exceptions=config.freeze_vision_module_exceptions)
+
+ def freeze_text_layers(self, module_exceptions=[]):
+ for module in [self.layers, self.norm]:
+ freeze_model(module, module_exceptions=module_exceptions)
+
+ def freeze_vision_layers(self, module_exceptions=[]):
+ freeze_model(self.vision_model, module_exceptions=module_exceptions)
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ image_encoder_embeddings: Optional[torch.FloatTensor] = None,
+ perceiver_embeddings: Optional[torch.FloatTensor] = None,
+ image_attention_mask: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: Optional[bool] = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, IdeficsBaseModelOutputWithPast]:
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape
+ elif inputs_embeds is not None:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ seq_length_with_past = seq_length
+ past_key_values_length = 0
+
+ if past_key_values is not None:
+ past_key_values_length = past_key_values[0][0].shape[2]
+ seq_length_with_past = seq_length_with_past + past_key_values_length
+
+ if attention_mask is not None and position_ids is None:
+ # create position_ids on the fly for batch generation
+ position_ids = attention_mask.long().cumsum(-1) - 1
+ position_ids.masked_fill_(attention_mask == 0, 1)
+ elif position_ids is None:
+ position_ids = torch.arange(
+ past_key_values_length, seq_length + past_key_values_length, dtype=torch.long, device=device
+ )
+ position_ids = position_ids.unsqueeze(0)
+
+ if (pixel_values, image_encoder_embeddings, perceiver_embeddings).count(None) != 2:
+ raise ValueError(
+ "Exactly 1 of pixel_values, image_encoder_embeddings or perceiver_embeddings has to be not-None."
+ )
+
+ elif pixel_values is not None:
+ pixel_values = pixel_values.to(dtype=self.dtype, device=device) # fp16 compatibility
+ batch_size, num_images = pixel_values.shape[:2]
+ pixel_values = pixel_values.contiguous().view(batch_size * num_images, *pixel_values.shape[2:])
+
+ # Get sequence from the vision encoder
+ image_hidden_states = self.vision_model(
+ pixel_values=pixel_values, interpolate_pos_encoding=interpolate_pos_encoding
+ ).last_hidden_state
+
+ elif image_encoder_embeddings is not None:
+ batch_size, num_images, image_seq_len, image_hidden_size = image_encoder_embeddings.size()
+ image_hidden_states = image_encoder_embeddings.to(dtype=self.dtype, device=device)
+ image_hidden_states = image_hidden_states.view(batch_size * num_images, image_seq_len, image_hidden_size)
+
+ if self.config.use_resampler:
+ if perceiver_embeddings is None:
+ perceiver_embeddings = self.perceiver_resampler(image_hidden_states)
+ image_seq_len, image_hidden_size = perceiver_embeddings.size(1), perceiver_embeddings.size(2)
+ else:
+ batch_size, num_images, image_seq_len, image_hidden_size = perceiver_embeddings.size()
+ image_hidden_states = perceiver_embeddings
+ elif perceiver_embeddings is None:
+ image_seq_len, image_hidden_size = image_hidden_states.size(1), image_hidden_states.size(2)
+ else:
+ raise ValueError("If `perceiver_embeddings` are passed, use_resampler should be True")
+
+ image_hidden_states = image_hidden_states.view(batch_size, num_images * image_seq_len, image_hidden_size)
+ # # Hack to use the model in full language modeling mode
+ # image_attention_mask = torch.zeros(batch_size, seq_length, 1, dtype=torch.long, device=image_hidden_states.device)
+ # Make image_attention_mask compatible with hidden states
+ text_seq_len = image_attention_mask.size(1)
+ image_attention_mask = image_attention_mask.unsqueeze(-1)
+ image_attention_mask = image_attention_mask.repeat(1, 1, 1, image_seq_len)
+ image_attention_mask = image_attention_mask.view(batch_size, text_seq_len, num_images * image_seq_len)
+
+ if image_hidden_states is not None:
+ image_batch_size, image_sequence_length, _ = image_hidden_states.size()
+ image_hidden_shape = (image_batch_size, image_sequence_length)
+ if image_attention_mask is None:
+ image_attention_mask = torch.ones(image_hidden_shape, device=device)
+ image_attention_mask = self.invert_attention_mask(image_attention_mask)
+ else:
+ image_attention_mask = None
+
+ # cross_attention_gate:
+ # For any tokens attending to no images, the hidden_states comming out of the cross-attention should be zeroed-out.
+ # `image_attention_mask` has shape [bsz, 1, num_images, hidden_size] with elements equal to either 0.0 or a very negative number.
+ # If any of the elements are 0.0, then the token is attending to at least one image and the gate value is 1. Otherwise the gate value is 0.
+ # `cross_attention_gate` has shape [bsz, seq_len] with elements equal to either 0.0 or 1.0.
+ cross_attention_gate = ((((image_attention_mask == 0.0).any(dim=-1)).to(dtype=self.dtype)).squeeze(dim=1)).to(
+ device
+ )
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input_ids)
+ # embed positions
+ if attention_mask is None:
+ attention_mask = torch.ones(
+ (batch_size, seq_length_with_past), dtype=torch.bool, device=inputs_embeds.device
+ )
+ attention_mask = _prepare_4d_causal_attention_mask_for_sdpa(
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
+ )
+
+ hidden_states = inputs_embeds
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ next_decoder_cache = () if use_cache else None
+
+ for idx, decoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ def vblock(
+ main_block,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_value,
+ image_hidden_states,
+ image_attention_mask,
+ cross_attention_gate,
+ output_attentions,
+ use_cache,
+ layer_idx,
+ cross_layer_interval,
+ gated_cross_attn_layers,
+ ):
+ # TODO(ls): Add cross attention values to respective lists
+ if layer_idx % cross_layer_interval == 0:
+ xblock = gated_cross_attn_layers[layer_idx // cross_layer_interval]
+ outputs = xblock(
+ hidden_states,
+ attention_mask=attention_mask,
+ image_hidden_states=image_hidden_states,
+ image_attention_mask=image_attention_mask,
+ cross_attention_gate=cross_attention_gate,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ past_key_value=None, # not implemented
+ )
+ hidden_states = outputs[0]
+
+ layer_outputs = main_block(
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ return layer_outputs
+
+ if self.gradient_checkpointing and self.training:
+ past_key_value = None
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ layer_outputs = self._gradient_checkpointing_func(
+ vblock,
+ decoder_layer,
+ hidden_states,
+ attention_mask,
+ position_ids,
+ past_key_value,
+ image_hidden_states,
+ image_attention_mask,
+ cross_attention_gate,
+ output_attentions,
+ use_cache,
+ idx,
+ self.cross_layer_interval,
+ self.gated_cross_attn_layers,
+ )
+ else:
+ layer_outputs = vblock(
+ decoder_layer,
+ hidden_states,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_value=past_key_value,
+ image_hidden_states=image_hidden_states,
+ image_attention_mask=image_attention_mask,
+ cross_attention_gate=cross_attention_gate,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ layer_idx=idx,
+ cross_layer_interval=self.cross_layer_interval,
+ gated_cross_attn_layers=self.gated_cross_attn_layers,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ hidden_states = self.norm(hidden_states)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ next_cache = next_decoder_cache if use_cache else None
+ image_hidden_states = image_hidden_states.view(batch_size, num_images, image_seq_len, image_hidden_size)
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, image_hidden_states]
+ if v is not None
+ )
+ return IdeficsBaseModelOutputWithPast(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ image_hidden_states=image_hidden_states,
+ )
+
+
+class IdeficsForVisionText2Text(IdeficsPreTrainedModel):
+ _keys_to_ignore_on_load_missing = [r"lm_head.weight"]
+ _tied_weights_keys = ["model.embed_tokens.weight", "lm_head.weight"]
+
+ def __init__(self, config, vision_model=None):
+ super().__init__(config)
+ self.model = IdeficsModel(config)
+
+ self.lm_head = IdeficsDecoupledLinear(
+ in_features=config.hidden_size,
+ out_features=config.vocab_size,
+ out_additional_features=config.additional_vocab_size,
+ bias=False,
+ partially_freeze=config.freeze_lm_head,
+ )
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.model.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.model.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head = new_embeddings
+
+ def set_decoder(self, decoder):
+ self.model = decoder
+
+ def get_decoder(self):
+ return self.model
+
+ def tie_weights(self):
+ """
+ Overwrite `transformers.modeling_utils.PreTrainedModel.tie_weights` to handle the case of
+ IdeficsDecoupledLinear and IdeficsDecoupledEmbedding.
+ """
+ output_embeddings = self.get_output_embeddings()
+ input_embeddings = self.get_input_embeddings()
+
+ if getattr(self.config, "tie_word_embeddings", True):
+ output_embeddings.weight = input_embeddings.weight
+ if input_embeddings.num_additional_embeddings > 0:
+ assert output_embeddings.out_additional_features == input_embeddings.num_additional_embeddings
+ output_embeddings.additional_fc.weight = input_embeddings.additional_embedding.weight
+
+ if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
+ output_embeddings.out_features = input_embeddings.num_embeddings
+ if hasattr(output_embeddings, "out_additional_features") and hasattr(
+ input_embeddings, "num_additional_embeddings"
+ ):
+ output_embeddings.out_additional_features = input_embeddings.num_additional_embeddings
+
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=IdeficsCausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_ids: torch.LongTensor = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ image_encoder_embeddings: Optional[torch.FloatTensor] = None,
+ perceiver_embeddings: Optional[torch.FloatTensor] = None,
+ image_attention_mask: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: Optional[bool] = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, IdeficsCausalLMOutputWithPast]:
+ r"""
+ Args:
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import AutoProcessor, IdeficsForVisionText2Text
+
+ >>> model = IdeficsForVisionText2Text.from_pretrained("HuggingFaceM4/idefics-9b")
+ >>> processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics-9b")
+
+ >>> dogs_image_url_1 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image1.jpeg"
+ >>> dogs_image_url_2 = "https://huggingface.co/datasets/hf-internal-testing/fixtures_nlvr2/raw/main/image2.jpeg"
+
+ >>> prompts = [
+ ... [
+ ... "User:",
+ ... dogs_image_url_1,
+ ... "Describe this image.\nAssistant: An image of two dogs.\n",
+ ... "User:",
+ ... dogs_image_url_2,
+ ... "Describe this image.\nAssistant:",
+ ... ]
+ ... ]
+ >>> inputs = processor(prompts, return_tensors="pt")
+ >>> generate_ids = model.generate(**inputs, max_new_tokens=6)
+ >>> processor.batch_decode(generate_ids, skip_special_tokens=True)
+ ```"""
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ outputs = self.model(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ past_key_values=past_key_values,
+ inputs_embeds=inputs_embeds,
+ pixel_values=pixel_values,
+ image_encoder_embeddings=image_encoder_embeddings,
+ perceiver_embeddings=perceiver_embeddings,
+ image_attention_mask=image_attention_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ interpolate_pos_encoding=interpolate_pos_encoding,
+ return_dict=return_dict,
+ )
+
+ hidden_states = outputs[0]
+ logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ labels = labels.to(logits.device)
+ # Shift so that tokens < n predict n
+ if attention_mask is not None:
+ shift_attention_mask = attention_mask[..., 1:].to(logits.device)
+ shift_logits = logits[..., :-1, :][shift_attention_mask != 0].contiguous()
+ shift_labels = labels[..., 1:][shift_attention_mask != 0].contiguous()
+ else:
+ shift_logits = logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return (loss,) + output if loss is not None else output
+
+ return IdeficsCausalLMOutputWithPast(
+ loss=loss,
+ logits=logits,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ image_hidden_states=outputs.image_hidden_states,
+ )
+
+ def prepare_inputs_for_generation(self, input_ids, past=None, **kwargs):
+ image_hidden_states = kwargs.pop("image_hidden_states", None)
+ if image_hidden_states is not None:
+ if self.config.use_resampler:
+ kwargs["perceiver_embeddings"] = image_hidden_states
+ else:
+ kwargs["image_encoder_embeddings"] = image_hidden_states
+ kwargs["pixel_values"] = None
+ inputs = prepare_inputs_for_generation(input_ids, past=past, **kwargs)
+ unwanted_kwargs = ["token_type_ids"]
+ for kwarg in unwanted_kwargs:
+ inputs.pop(kwarg, None)
+ return inputs
+
+ @staticmethod
+ def _expand_inputs_for_generation(
+ *args,
+ **model_kwargs,
+ ):
+ return expand_inputs_for_generation(*args, **model_kwargs)
+
+ def _update_model_kwargs_for_generation(
+ self,
+ outputs: ModelOutput,
+ model_kwargs: Dict[str, Any],
+ is_encoder_decoder: bool = False,
+ standardize_cache_format: bool = False,
+ ) -> Dict[str, Any]:
+ model_kwargs = super()._update_model_kwargs_for_generation(
+ outputs,
+ model_kwargs,
+ is_encoder_decoder,
+ standardize_cache_format,
+ )
+
+ if "image_attention_mask" in model_kwargs:
+ image_attention_mask = model_kwargs["image_attention_mask"]
+ last_mask = image_attention_mask[:, -1, :].unsqueeze(1)
+ model_kwargs["image_attention_mask"] = last_mask
+
+ # Get the precomputed image_hidden_states
+ model_kwargs["image_hidden_states"] = outputs.image_hidden_states
+ return model_kwargs
+
+ @staticmethod
+ def _reorder_cache(past, beam_idx):
+ reordered_past = ()
+ for layer_past in past:
+ reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),)
+ return reordered_past
diff --git a/venv/lib/python3.10/site-packages/transformers/models/idefics/perceiver.py b/venv/lib/python3.10/site-packages/transformers/models/idefics/perceiver.py
new file mode 100644
index 0000000000000000000000000000000000000000..888c5b0bb9395548c90deac4a70350d1ad39e2d8
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/idefics/perceiver.py
@@ -0,0 +1,188 @@
+# This code was adapted from https://github.com/lucidrains/flamingo-pytorch licensed under the MIT License.
+#
+# MIT License
+#
+# Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+
+"""
+
+Generic interface to various configurations of the Perceiver Resampler, that simply takes in a series of (potentially
+time-indexed) contextual embeddings, and "resamples" (compresses) them down to a pre-specified number of latents! Note
+that the Perceiver in general resamples based solely off the *long-range* context; there's a nice opportunity here to
+prime the Perceiver Resampler with say a single layer's worth of language embeddings (the target domain), and use that
+to softly "retrieve & compress" what we need --> this would be a novel contribution we should explore.
+
+References:
+ - DeepMind's Flamingo: https://www.deepmind.com/blog/tackling-multiple-tasks-with-a-single-visual-language-model
+ - Code borrowed w/ love from: https://github.com/lucidrains/flamingo-pytorch
+
+"""
+from typing import Optional, Tuple
+
+import torch
+import torch.nn as nn
+
+from .configuration_idefics import IdeficsConfig
+
+
+class IdeficsPerceiverResampler(nn.Module):
+ def __init__(
+ self, config: IdeficsConfig, embed_dim: int, depth: int, n_heads: int, head_dim: int, n_latents: int
+ ) -> None:
+ """
+ Instantiates a Perceiver Resampler that operates over a sequence of embeddings (say from a ResNet or ViT or
+ MAE) of a given dimension, performs `depth` blocks of cross-attention with a fixed `n_latents` inputs, then
+ returns a Tensor of shape [bsz, n_latents, embed_dim]. :param embed_dim: Dimensionality of embeddings being fed
+ to the Perceiver Resampler (also dimensionality of latent embeddings *returned* by the Perceiver Resampler.
+ Could be e.g., VIT embed_dim, ResNet pool dim, and so on.
+
+ Args:
+ config (`IdeficsConfig`): config object
+ embed_dim (`int`): The size of each embedding vector
+ depth (`int`): Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
+ n_heads (`int`): Number of heads in each Transformer block (for multi-headed self-attention).
+ head_dim (`int`): Dimensionality of each head projection in the Transformer block.
+ n_latents (`int`):
+ Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
+
+ """
+ super().__init__()
+ self.embed_dim, self.n_heads, self.head_dim, self.n_latents = embed_dim, n_heads, head_dim, n_latents
+ self.qk_layer_norms = config.perceiver_config.qk_layer_norms_perceiver
+
+ # Create Latents for Perceiver
+ self.latents = nn.Parameter(torch.randn(self.n_latents, self.embed_dim), requires_grad=True)
+
+ self.intermediate_dim = (
+ self.embed_dim * 4
+ if not hasattr(config.vision_config, "embed_dim")
+ else config.vision_config.embed_dim * 4
+ )
+ # Create Transformer Blocks
+ self.blocks = nn.ModuleList(
+ [
+ nn.ModuleList(
+ [
+ IdeficsPerceiverAttention(self.embed_dim, self.n_heads, self.head_dim, self.qk_layer_norms),
+ IdeficsMLP(self.intermediate_dim, config),
+ ]
+ )
+ for _ in range(depth)
+ ]
+ )
+ self.layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(self, context: torch.Tensor) -> torch.Tensor:
+ """Resample arbitrary length context & *compress* down to self.n_latents latent embeddings"""
+ # einsum.repeat(self.latents, "seq embed -> bsz seq embed", bsz=context.shape[0])
+ latents = self.latents.repeat(context.shape[0], 1, 1)
+
+ # Feed through Perceiver Attention blocks...
+ for attn, ff in self.blocks:
+ latents = attn(context, latents) + latents
+ latents = ff(latents) + latents
+
+ return self.layer_norm(latents)
+
+
+class IdeficsPerceiverAttention(nn.Module):
+ def __init__(self, embed_dim: int, n_heads: int, head_dim: int, qk_layer_norms: bool) -> None:
+ """Perceiver Cross-Attention Module --> let long-form inputs be `context`, resampled embeddings be `latents`"""
+ super().__init__()
+ self.embed_dim, self.n_heads, self.head_dim = embed_dim, n_heads, head_dim
+ self.qk_layer_norms = qk_layer_norms
+ # Normalization & Scaling
+ self.context_layer_norm = nn.LayerNorm(self.embed_dim)
+ self.latents_layer_norm = nn.LayerNorm(self.embed_dim)
+ if self.qk_layer_norms:
+ self.q_layer_norm = nn.LayerNorm(self.head_dim)
+ self.k_layer_norm = nn.LayerNorm(self.head_dim)
+
+ self.qk_scale = self.head_dim**-0.5
+
+ # Q, K, V Projection (no bias -- detail from Perceiver/Flamingo Papers).
+ self.q_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
+ self.k_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
+ self.v_proj = nn.Linear(self.embed_dim, self.n_heads * self.head_dim, bias=False)
+
+ self.output_proj = nn.Linear(self.n_heads * self.head_dim, embed_dim, bias=False)
+
+ def forward(self, context: torch.Tensor, latents: torch.Tensor) -> torch.Tensor:
+ """
+ Runs Perceiver Self-Attention, with special (context, latents) appended along the `seq` dimension!
+
+ Args:
+ context (`torch.Tensor`):
+ Tensor of shape `[bsz, seq, embed_dim]` representing long-form context to resample.
+ latents (`torch.Tensor`):
+ Tensor of shape `[bsz, n_latents, embed_dim]` representing fixed length latents to compress to.
+
+ Returns:
+ `torch.Tensor`: Tensor of shape `[bsz, n_latents, embed_dim]` representing attention over latents w/ cross
+ from context.
+ """
+ context = self.context_layer_norm(context)
+ latents = self.latents_layer_norm(latents)
+ batch_size, seq_length, embed_dim = context.shape[:3]
+
+ # Query, Key, Value Projections --> Note that in Flamingo, latents are *concatenated* with context prior to attn!
+ # Note: This results in queries w/ `seq = n_latents`, and keys, values with `seq = len(context) + n_latents`
+ q = self.q_proj(latents)
+ k = self.k_proj(torch.cat([context, latents], dim=-2))
+ v = self.v_proj(torch.cat([context, latents], dim=-2))
+
+ # Multiheaded Self-Attention w/ stable softmax (subtract per-row max -- `amax` -- before softmax call)
+ # =>> `attn` should be a 2D matrix of shape [n_latents x (context + n_latents)]
+ # einsum.rearrange(x, "bsz seq (heads embed) -> bsz heads seq embed", heads=self.n_heads)
+ q, k, v = [x.reshape(batch_size, x.shape[1], self.n_heads, self.head_dim).transpose(1, 2) for x in (q, k, v)]
+
+ if self.qk_layer_norms:
+ q = self.q_layer_norm(q)
+ k = self.k_layer_norm(k)
+
+ scores = torch.einsum("... i d, ... j d -> ... i j", q * self.qk_scale, k)
+ stabilized_scores = scores - (scores.amax(dim=-1, keepdim=True).detach())
+ attn = stabilized_scores.softmax(dim=-1)
+
+ # Attend & project back to output...
+ resampled = torch.einsum("... i j, ... j d -> ... i d", attn, v)
+ # einsum.rearrange(resampled, "bsz heads seq embed -> bsz seq (heads embed)", heads=self.n_heads)
+ return self.output_proj(resampled.transpose(1, 2).flatten(-2))
+
+
+class IdeficsMLP(nn.Module):
+ def __init__(self, intermediate_size, config: IdeficsConfig):
+ """Simple MLP block with intermediate_size and embedding size"""
+ super().__init__()
+ self.embed_dim = config.vision_config.embed_dim
+ self.ln = nn.LayerNorm(self.embed_dim)
+ self.fc = nn.Linear(self.embed_dim, intermediate_size, bias=False)
+ self.act = nn.ReLU()
+ self.c_proj = nn.Linear(intermediate_size, self.embed_dim, bias=False)
+
+ def forward(self, hidden_states: Optional[Tuple[torch.FloatTensor]]) -> torch.FloatTensor:
+ hidden_states = self.ln(hidden_states)
+ hidden_states = self.fc(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.c_proj(hidden_states)
+
+ return hidden_states
diff --git a/venv/lib/python3.10/site-packages/transformers/models/idefics/processing_idefics.py b/venv/lib/python3.10/site-packages/transformers/models/idefics/processing_idefics.py
new file mode 100644
index 0000000000000000000000000000000000000000..d7fd8c8de6555e3e820d807413e5efafd37f8f79
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/idefics/processing_idefics.py
@@ -0,0 +1,408 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+Processor class for IDEFICS.
+"""
+
+from typing import Callable, List, Optional, Union
+from urllib.parse import urlparse
+
+from ...feature_extraction_utils import BatchFeature
+from ...processing_utils import ProcessorMixin
+from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, TextInput, TruncationStrategy
+from ...utils import TensorType, is_torch_available
+
+
+if is_torch_available():
+ import torch
+
+
+IMAGE_TOKEN = ""
+
+
+# copied from m4.training.packing
+def incremental_to_binary_attention_mask(incremental_mask, num_classes=-1):
+ # This function converts: [-1, 0, 1] => [[0, 0], [1, 0], [0, 1]]
+
+ # If any of images index are more than num_classes, set them to -1.
+ # Words after the max number of images allowed have been seen don't attend on anything
+ if num_classes != -1:
+ incremental_mask[incremental_mask >= num_classes] = -1
+
+ negatives = incremental_mask == -1
+ incremental_mask[negatives] = 0
+ attn_mask = torch.nn.functional.one_hot(incremental_mask, num_classes=num_classes)
+ attn_mask[negatives, :] = 0
+ return attn_mask
+
+
+# copied from m4.training.packing
+def image_attention_mask_for_packed_input_ids(input_ids, tokenizer):
+ image_attention_mask = torch.full_like(input_ids, fill_value=-1)
+ next_image_attention_mask = torch.full_like(input_ids, fill_value=-1)
+ image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
+ eod_token_id = tokenizer.eos_token_id
+ for batch_idx in range(input_ids.size(0)):
+ count = -1
+ seen_eod = False
+ for idx, token_id in enumerate(input_ids[batch_idx]):
+ if token_id == image_token_id:
+ count += 1
+ image_attention_mask[batch_idx][idx] = count
+ seen_eod = False
+ else:
+ image_attention_mask[batch_idx][idx] = count
+
+ if seen_eod:
+ image_attention_mask[batch_idx][idx] = -1
+
+ if token_id == eod_token_id:
+ seen_eod = True
+
+ for batch_idx in range(input_ids.size(0)):
+ count = -1
+ seen_eod = False
+ for idx in range(input_ids[batch_idx].size(0) - 1, -1, -1):
+ token_id = input_ids[batch_idx][idx]
+ if token_id == image_token_id:
+ count += 1
+ next_image_attention_mask[batch_idx][idx] = count
+ seen_eod = False
+ else:
+ next_image_attention_mask[batch_idx][idx] = count
+
+ if token_id == eod_token_id:
+ seen_eod = True
+
+ if seen_eod:
+ next_image_attention_mask[batch_idx][idx] = -1
+
+ non_negative_indices = next_image_attention_mask[batch_idx] != -1
+ next_image_attention_mask[batch_idx][non_negative_indices] -= count
+ next_image_attention_mask[batch_idx][non_negative_indices] *= -1
+
+ return image_attention_mask, next_image_attention_mask
+
+
+def is_url(string):
+ """Checks if the passed string contains a valid url and nothing else. e.g. if space is included it's immediately
+ invalidated the url"""
+ if " " in string:
+ return False
+ result = urlparse(string)
+ return all([result.scheme, result.netloc])
+
+
+class IdeficsProcessor(ProcessorMixin):
+ r"""
+ Constructs a IDEFICS processor which wraps a LLama tokenizer and IDEFICS image processor into a single processor.
+
+ [`IdeficsProcessor`] offers all the functionalities of [`IdeficsImageProcessor`] and [`LlamaTokenizerFast`]. See
+ the docstring of [`~IdeficsProcessor.__call__`] and [`~IdeficsProcessor.decode`] for more information.
+
+ Args:
+ image_processor (`IdeficsImageProcessor`):
+ An instance of [`IdeficsImageProcessor`]. The image processor is a required input.
+ tokenizer (`LlamaTokenizerFast`):
+ An instance of [`LlamaTokenizerFast`]. The tokenizer is a required input.
+ image_size (`int`, *optional*, defaults to 224): Image size (assuming a square image)
+ """
+
+ attributes = ["image_processor", "tokenizer"]
+ image_processor_class = "IdeficsImageProcessor"
+ tokenizer_class = "LlamaTokenizerFast"
+
+ def __init__(self, image_processor, tokenizer=None, image_size=224, add_end_of_utterance_token=None, **kwargs):
+ if image_processor is None:
+ raise ValueError("You need to specify an `image_processor`.")
+ if tokenizer is None:
+ raise ValueError("You need to specify a `tokenizer`.")
+
+ super().__init__(image_processor, tokenizer)
+ self.current_processor = self.image_processor
+ self.image_token_id = tokenizer.convert_tokens_to_ids(IMAGE_TOKEN)
+
+ self.default_image_dims = (
+ self.image_processor.image_num_channels,
+ self.image_processor.image_size,
+ self.image_processor.image_size,
+ )
+
+ self.tokenizer_was_trained_with_end_of_utterance_token = (
+ True
+ if "" in self.tokenizer.special_tokens_map.get("additional_special_tokens", [])
+ else False
+ )
+
+ def __call__(
+ self,
+ prompts: Union[List[TextInput], List[List[TextInput]]],
+ padding: Union[bool, str, PaddingStrategy] = "longest",
+ truncation: Union[bool, str, TruncationStrategy] = None,
+ max_length: Optional[int] = None,
+ transform: Callable = None,
+ add_eos_token=False,
+ add_end_of_utterance_token=None,
+ debug=False,
+ return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
+ ) -> BatchEncoding:
+ """This method takes batched or non-batched prompts made of text and images and converts them into prompts that
+ the model was trained on and prepares the image pixel values for the model to process.
+
+ Args:
+ prompts (`Union[List[TextInput], [List[List[TextInput]]]]`):
+ either a single prompt or a batched list of prompts - see the detailed description immediately after
+ the end of the arguments doc section.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `"longest"`):
+ Select a strategy to pad the returned sequences (according to the model's padding side and padding
+ index) among:
+ - `True` or `'longest'` (default): Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'`: No padding. This will raise an error if the input sequences are of different
+ lengths.
+ Note: Unlike most processors, which set padding=`False` by default, `IdeficsProcessor` sets `padding="longest"`
+ by default. See https://github.com/huggingface/transformers/pull/29449#pullrequestreview-1925576061 for why.
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ truncation (`bool`, *optional*):
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
+ transform (`Callable`, *optional*):
+ A custom transform function that accepts a single image can be passed for training. For example,
+ `torchvision.Compose` can be used to compose multiple functions. If `None` a preset inference-specific
+ set of transforms will be applied to the images
+ add_eos_token (`bool`, *optional*, defaults to `False`):
+ Adds `eos_token` at the end of the final prompt if True`
+ add_end_of_utterance_token (`bool`, *optional*)
+ Whether to automatically add `` after each prompt's text input (unless followed by an
+ image). If `None` the tokenizer will be checked instead and if this token is found in
+ `additional_special_tokens` then the value will be `True`.
+ debug (`bool`, *optional*, defaults to `False`):
+ `True` value will help debug prompt generation by dumping useful information
+ return_tensors (`str` or `TensorType`, *optional*, defaults to `TensorType.PYTORCH`):
+ The type of tensors to return. Can be one of:
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
+
+ Returns:
+ a dict with entries: `input_ids`, `attention_mask`, `pixel_values`, `image_attention_mask` which can be
+ directly passed to `model.generate`
+
+ Detailed explanation:
+
+ Each entry in `prompts` is either a text to be passed as is or an image that will be processed.
+
+ An image can be either an image object (`PIL.Image`) or a url from which the image can be retrieved.
+
+ When the processor encounters an image it'll inject ``
+ entry into the prompt.
+
+ Example:
+
+ ```python
+ checkpoint = "HuggingFaceM4/idefics-9b"
+ processor = AutoProcessor.from_pretrained(checkpoint)
+ url = "https://hips.hearstapps.com/hmg-prod/images/cute-photos-of-cats-in-grass-1593184777.jpg"
+ img = processor.image_processor.fetch_images([url])[0]
+
+ prompts = [
+ "User:",
+ img,
+ "Describe this image.\nAssistant: An image of two kittens in grass.\n",
+ "User:",
+ "https://hips.hearstapps.com/hmg-prod/images/dog-puns-1581708208.jpg",
+ "Describe this image.\nAssistant:",
+ ]
+
+ inputs = processor(prompts, return_tensors="pt")
+ generated_ids = model.generate(**inputs, max_length=100)
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
+ ```
+
+ In this example the `prompts` will be converted into:
+
+ ```
+ User:Describe this image.
+ Assistant: An image of two kittens in grass.
+ User:Describe this image.
+ Assistant:'
+ ```
+
+ and the two images will be massaged using [`IdeficsImageProcessor.__call__`] method and placed inside the
+ `pixel_values` dict entry of the return value.
+
+ This example also examplifies that images can be passed as objects or as text urls. It can be seen that the
+ first image is passed as object and the second one as a url.
+
+ To do training do:
+
+ ```python
+ image_transform = transforms.Compose(
+ [
+ transforms.RandomResizedCrop(
+ (w, h), scale=(0.9, 1.0), interpolation=transforms.InterpolationMode.BICUBIC
+ ),
+ transforms.ToTensor(),
+ transforms.Normalize(mean=self.image_mean, std=self.image_std),
+ ]
+ )
+ inputs = processor(prompts, transform=image_transform, return_tensors="pt")
+ ```
+
+ In order to help debug prompt generation enable `debug=True` which will show you what's happening.
+
+ """
+
+ # if the value isn't overriden by the user, check if the tokenizer was trained with this token and then use it
+ if add_end_of_utterance_token is None:
+ add_end_of_utterance_token = self.tokenizer_was_trained_with_end_of_utterance_token
+
+ # turn non-batched prompts into batched
+ if not any(isinstance(i, list) for i in prompts):
+ prompts = [prompts]
+
+ fake_token = ""
+ image_token = ""
+ end_of_utterance_token = ""
+
+ def image_tokens(last_was_image):
+ if last_was_image:
+ return image_token + fake_token
+ else:
+ return fake_token + image_token + fake_token
+
+ all_prompts = []
+ all_images = []
+ for sample in prompts:
+ # the model was trained on samples starting with
+ full_text = f"{self.tokenizer.bos_token}"
+
+ # an image can either be an image object in the item or the url, everything else is a verbatim prompt text
+ image_objects = []
+ last_was_image = False
+ last_was_text = False
+ for i, item in enumerate(sample):
+ if i > 0:
+ last_was_text = True if not last_was_image else False
+
+ if isinstance(item, str):
+ item = item.strip(" ")
+ if is_url(item):
+ image = self.image_processor.fetch_images(item)
+ full_text += image_tokens(last_was_image)
+ image_objects.append(image)
+ last_was_image = True
+ else:
+ # we add end_of_utterance_token between each subsequent text prompts (but not at the last one!)
+ if add_end_of_utterance_token and last_was_text:
+ full_text += end_of_utterance_token
+ full_text += item
+ last_was_image = False
+ else:
+ # must be an image obj
+ full_text += image_tokens(last_was_image)
+ image_objects.append(item)
+ last_was_image = True
+
+ if add_eos_token:
+ full_text += self.tokenizer.eos_token
+
+ if debug is True:
+ print(f"{full_text=}")
+
+ image_objects = self.image_processor(image_objects, transform=transform)
+
+ all_prompts.append(full_text)
+ all_images.append(image_objects)
+
+ text_encoding = self.tokenizer(
+ text=all_prompts,
+ add_special_tokens=False,
+ padding=padding,
+ truncation=truncation,
+ max_length=max_length,
+ )
+ all_texts = text_encoding["input_ids"]
+ all_attention_masks = text_encoding["attention_mask"]
+
+ # max_num_images has to be at least 1 even when there are no images
+ max_num_images = max(len(x) for x in all_images)
+ max_num_images = max(1, max_num_images)
+
+ at_least_one_image = sum(len(x) for x in all_images) > 0
+ output_input_ids = []
+ output_images = []
+ output_attention_masks = []
+ for text, attention_mask, images in zip(all_texts, all_attention_masks, all_images):
+ padded_input_ids = text
+
+ image_count = padded_input_ids.count(self.image_token_id)
+ local_max_num_images = min(image_count, max_num_images)
+
+ current_images = images[:local_max_num_images]
+
+ if len(current_images) > 0:
+ padded_image_tensor = torch.zeros(max_num_images, *current_images.size()[1:])
+ padded_image_tensor[: current_images.size(0)] = current_images
+ else:
+ padded_image_tensor = torch.zeros(max_num_images, *self.default_image_dims)
+
+ output_images.append(padded_image_tensor)
+ output_input_ids.append(torch.tensor(padded_input_ids))
+ output_attention_masks.append(torch.tensor(attention_mask))
+
+ output_input_ids = torch.stack(output_input_ids)
+ output_images = torch.stack(output_images)
+ output_attention_masks = torch.stack(output_attention_masks)
+
+ if at_least_one_image:
+ image_attention_mask, _ = image_attention_mask_for_packed_input_ids(output_input_ids, self.tokenizer)
+ image_attention_mask = incremental_to_binary_attention_mask(
+ image_attention_mask, num_classes=max_num_images
+ )
+ else:
+ # in full language mode we set the image mask to all-0s
+ image_attention_mask = torch.zeros(
+ output_input_ids.shape[0], output_input_ids.shape[1], 1, dtype=torch.bool
+ )
+
+ return BatchFeature(
+ data={
+ "input_ids": output_input_ids,
+ "attention_mask": output_attention_masks,
+ "pixel_values": output_images,
+ "image_attention_mask": image_attention_mask,
+ }
+ )
+
+ def batch_decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
+ refer to the docstring of this method for more information.
+ """
+ return self.tokenizer.batch_decode(*args, **kwargs)
+
+ def decode(self, *args, **kwargs):
+ """
+ This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
+ the docstring of this method for more information.
+ """
+ return self.tokenizer.decode(*args, **kwargs)
+
+ @property
+ def model_input_names(self):
+ tokenizer_input_names = self.tokenizer.model_input_names
+ image_processor_input_names = self.image_processor.model_input_names
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
diff --git a/venv/lib/python3.10/site-packages/transformers/models/idefics/vision.py b/venv/lib/python3.10/site-packages/transformers/models/idefics/vision.py
new file mode 100644
index 0000000000000000000000000000000000000000..d90f837b3c77baed36b1e23175939b264c155d0f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/idefics/vision.py
@@ -0,0 +1,490 @@
+# coding=utf-8
+# Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch IdeficsVision model: a copy of CLIPVisionModel using a simpler config object"""
+
+
+import math
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...activations import ACT2FN
+from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling
+from ...utils import ModelOutput, logging
+from .configuration_idefics import IdeficsVisionConfig
+
+
+logger = logging.get_logger(__name__)
+
+
+@dataclass
+class IdeficsVisionModelOutput(ModelOutput):
+ """
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
+
+ Args:
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
+ The image embeddings obtained by applying the projection layer to the pooler_output.
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Sequence of hidden-states at the output of the last layer of the model.
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
+
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
+ sequence_length)`.
+
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
+ heads.
+ """
+
+ image_embeds: Optional[torch.FloatTensor] = None
+ last_hidden_state: torch.FloatTensor = None
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
+
+
+# Adapted from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings
+class IdeficsVisionEmbeddings(nn.Module):
+ def __init__(self, config: IdeficsVisionConfig):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.image_size = config.image_size
+ self.patch_size = config.patch_size
+
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
+
+ self.patch_embedding = nn.Conv2d(
+ in_channels=config.num_channels,
+ out_channels=self.embed_dim,
+ kernel_size=self.patch_size,
+ stride=self.patch_size,
+ bias=False,
+ )
+
+ self.num_patches = (self.image_size // self.patch_size) ** 2
+ self.num_positions = self.num_patches + 1
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
+
+ # Heavily inspired from https://github.com/huggingface/transformers/blob/v4.33.0/src/transformers/models/vit/modeling_vit.py#L82
+ def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
+ """
+ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher
+ resolution images.
+
+ Source:
+ https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174
+ """
+
+ num_patches = embeddings.shape[1] - 1
+ pos_embed = self.position_embedding(self.position_ids)
+ num_positions = pos_embed.shape[1] - 1
+ if num_patches == num_positions and height == width:
+ return pos_embed
+ class_pos_embed = pos_embed[:, 0]
+ patch_pos_embed = pos_embed[:, 1:]
+
+ embed_dim = embeddings.shape[-1]
+ num_h_patches = height // self.config.patch_size
+ num_w_patches = width // self.config.patch_size
+ # we add a small number to avoid floating point error in the interpolation
+ # see discussion at https://github.com/facebookresearch/dino/issues/8
+ num_h_patches, num_w_patches = num_h_patches + 0.1, num_w_patches + 0.1
+ sqrt_num_positions = math.sqrt(num_positions)
+ patch_pos_embed = patch_pos_embed.reshape(1, int(sqrt_num_positions), int(sqrt_num_positions), embed_dim)
+ patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
+ fp32_upcasting = patch_pos_embed.dtype == torch.bfloat16
+ if fp32_upcasting:
+ logger.warning_once(
+ "Upcasting patch_pos_embed to fp32 for interpolation since `upsample_bicubic2d_out_frame` in nn.functional.interpolate "
+ "is not implemented for 'torch.bfloat16' dtype. This will result in a slight overhead."
+ )
+ patch_pos_embed = patch_pos_embed.to(torch.float)
+ patch_pos_embed = nn.functional.interpolate(
+ patch_pos_embed,
+ scale_factor=(num_h_patches / sqrt_num_positions, num_w_patches / sqrt_num_positions),
+ mode="bicubic",
+ align_corners=False,
+ )
+ if fp32_upcasting:
+ patch_pos_embed = patch_pos_embed.to(torch.bfloat16)
+ if int(num_h_patches) != patch_pos_embed.shape[-2] or int(num_w_patches) != patch_pos_embed.shape[-1]:
+ raise ValueError(
+ f"Number of patches for images ({int(num_h_patches), int(num_w_patches)}) don't match the "
+ f"shape of position embedding ({patch_pos_embed.shape[-2], patch_pos_embed.shape[-1]})"
+ )
+ patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, embed_dim)
+ return torch.cat((class_pos_embed.unsqueeze(0), patch_pos_embed), dim=1)
+
+ def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding: bool = False) -> torch.Tensor:
+ batch_size, num_channels, height, width = pixel_values.shape
+ if not interpolate_pos_encoding:
+ if height != self.image_size or width != self.image_size:
+ raise ValueError(
+ f"Input image size ({height}*{width}) doesn't match model"
+ f" ({self.image_size}*{self.image_size}). You should try to set `interpolate_pos_encoding=True`"
+ )
+
+ target_dtype = self.patch_embedding.weight.dtype
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
+
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
+
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
+
+ # add positional encoding to each token
+ if interpolate_pos_encoding:
+ embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
+ else:
+ embeddings = embeddings + self.position_embedding(self.position_ids)
+
+ return embeddings
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->IdeficsVision
+class IdeficsVisionAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.embed_dim = config.hidden_size
+ self.num_heads = config.num_attention_heads
+ self.head_dim = self.embed_dim // self.num_heads
+ if self.head_dim * self.num_heads != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
+ f" {self.num_heads})."
+ )
+ self.scale = self.head_dim**-0.5
+ self.dropout = config.attention_dropout
+
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ bsz, tgt_len, embed_dim = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scale
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.view(*proj_shape)
+ value_states = value_states.view(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ # apply the causal_attention_mask first
+ if causal_attention_mask is not None:
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
+ f" {causal_attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if output_attentions:
+ # this operation is a bit akward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->IdeficsVision
+class IdeficsVisionMLP(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = self.fc2(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPEncoderLayer with CLIP->IdeficsVision
+class IdeficsVisionEncoderLayer(nn.Module):
+ def __init__(self, config: IdeficsVisionConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+ self.self_attn = IdeficsVisionAttention(config)
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+ self.mlp = IdeficsVisionMLP(config)
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: torch.Tensor,
+ causal_attention_mask: torch.Tensor,
+ output_attentions: Optional[bool] = False,
+ ) -> Tuple[torch.FloatTensor]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ `(config.encoder_attention_heads,)`.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ """
+ residual = hidden_states
+
+ hidden_states = self.layer_norm1(hidden_states)
+ hidden_states, attn_weights = self.self_attn(
+ hidden_states=hidden_states,
+ attention_mask=attention_mask,
+ causal_attention_mask=causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = residual + hidden_states
+
+ residual = hidden_states
+ hidden_states = self.layer_norm2(hidden_states)
+ hidden_states = self.mlp(hidden_states)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs
+
+
+# Copied from transformers.models.clip.modeling_clip.CLIPEncoder with CLIP->IdeficsVision
+class IdeficsVisionEncoder(nn.Module):
+ """
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
+ [`IdeficsVisionEncoderLayer`].
+
+ Args:
+ config: IdeficsVisionConfig
+ """
+
+ def __init__(self, config: IdeficsVisionConfig):
+ super().__init__()
+ self.config = config
+ self.layers = nn.ModuleList([IdeficsVisionEncoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.gradient_checkpointing = False
+
+ def forward(
+ self,
+ inputs_embeds,
+ attention_mask: Optional[torch.Tensor] = None,
+ causal_attention_mask: Optional[torch.Tensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutput]:
+ r"""
+ Args:
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
+ for more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ hidden_states = inputs_embeds
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ encoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions,
+ )
+ else:
+ layer_outputs = encoder_layer(
+ hidden_states,
+ attention_mask,
+ causal_attention_mask,
+ output_attentions=output_attentions,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ if output_hidden_states:
+ encoder_states = encoder_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
+ )
+
+
+# Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer
+class IdeficsVisionTransformer(nn.Module):
+ def __init__(self, config: IdeficsVisionConfig):
+ super().__init__()
+ self.config = config
+ embed_dim = config.hidden_size
+
+ self.embeddings = IdeficsVisionEmbeddings(config)
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+ self.encoder = IdeficsVisionEncoder(config)
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
+
+ # Adapted from transformers.models.clip.modeling_clip.CLIPVisionTransformer.forward
+ def forward(
+ self,
+ pixel_values: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ interpolate_pos_encoding: Optional[bool] = False,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
+ r"""
+ Returns:
+
+ """
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if pixel_values is None:
+ raise ValueError("You have to specify pixel_values")
+
+ hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
+ hidden_states = self.pre_layrnorm(hidden_states)
+
+ encoder_outputs = self.encoder(
+ inputs_embeds=hidden_states,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ last_hidden_state = encoder_outputs[0]
+ pooled_output = last_hidden_state[:, 0, :]
+ pooled_output = self.post_layernorm(pooled_output)
+
+ if not return_dict:
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=last_hidden_state,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )