diff --git a/.gitattributes b/.gitattributes
index aa914d5612c26cde13a2e14a198eb26635baa5dc..df58a4a76c3faf9d977b58a8ceea4f3dade9e9c1 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -148,3 +148,6 @@ venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/ptxas filter=lfs d
venv/lib/python3.10/site-packages/torch/bin/protoc filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/torch/bin/protoc-3.13.0.0 filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/nvdisasm filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1600 filter=lfs diff=lfs merge=lfs -text
diff --git a/ckpts/universal/global_step20/mp_rank_01_model_states.pt b/ckpts/universal/global_step20/mp_rank_01_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..fc599df260f23a65182ff5f39a51d31ed4e4fce4
--- /dev/null
+++ b/ckpts/universal/global_step20/mp_rank_01_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3054d4175f039469ad55167090ec83f5389bbb93415dfb894c919cf1d0555cf3
+size 4230020
diff --git a/ckpts/universal/global_step20/mp_rank_02_model_states.pt b/ckpts/universal/global_step20/mp_rank_02_model_states.pt
new file mode 100644
index 0000000000000000000000000000000000000000..efebbd9ad42034dd328801d256fe689e9a943231
--- /dev/null
+++ b/ckpts/universal/global_step20/mp_rank_02_model_states.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:28bc7d19f509edecc0170ba2ab6c7411a909cff777d8e91fdb2d92928e4d9dae
+size 4230020
diff --git a/ckpts/universal/global_step40/zero/11.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/11.attention.query_key_value.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..584a42a6b998d9e36bb3aec721d53bffc3657ace
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/11.attention.query_key_value.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:2002b66f9e3a20583660bc8a5d42d749a3e00b8bb7f4587d2a9c913a390d20eb
+size 50332828
diff --git a/ckpts/universal/global_step40/zero/11.attention.query_key_value.weight/fp32.pt b/ckpts/universal/global_step40/zero/11.attention.query_key_value.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..236c53d02391cdade05373c69b62e751e2dc749a
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/11.attention.query_key_value.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a57df360496dae10c25dd7c1428cd53a02ed910f6deb63a9a06f298b2a3f88b1
+size 50332749
diff --git a/lm-evaluation-harness/tests/testdata/anli_r1-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/anli_r1-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..4450c0628e9d9a6f8ff90c9efa0c5e5b1b7e4069
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/anli_r1-v0-loglikelihood
@@ -0,0 +1 @@
+3a84baf2f170e138c6ce0bc9f06f905def35d705fa2b8781f10c87aef404c4cb
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/arc_challenge-v2.0-loglikelihood b/lm-evaluation-harness/tests/testdata/arc_challenge-v2.0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..53b28b5b86050168e13400d47dbf169de133d035
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/arc_challenge-v2.0-loglikelihood
@@ -0,0 +1 @@
+8ebbbc510644ede7bf53496c381e276d5a1eec14828870e8b7e611f231e6d5f6
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_4ds-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/arithmetic_4ds-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..154cf9c5946ed829ce7e2f173a2b03554fe789a1
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/arithmetic_4ds-v0-loglikelihood
@@ -0,0 +1 @@
+d915830b8621e66331383bb2ae4c60acebf008e2f94741092ef4c33ea5441037
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_adjunct_island-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_adjunct_island-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..85f0e8fb2af3101c8a916368f957ab4968fd132b
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_adjunct_island-v0-loglikelihood
@@ -0,0 +1 @@
+976a5cac4bdb724632eebd4cb9e522203ce3da8d5525288a597c86e80469f3f2
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_animate_subject_trans-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_animate_subject_trans-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..07106a905853aad9876257f308e3af5900066253
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_animate_subject_trans-v0-loglikelihood
@@ -0,0 +1 @@
+2a84231e7b79f517427e57e2099c88fed3d60a7efab4ef9506e263b4091d5cfa
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_complex_left_branch-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_complex_left_branch-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..8970b32aff4c8e5f815453c87bc241e8ca2f01e5
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_complex_left_branch-v0-loglikelihood
@@ -0,0 +1 @@
+7e1cc5b9f71abfbe56c4bdf343a1e5632785b66a986b8e904a41ed8f45a2c33e
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_object_extraction-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_object_extraction-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..f1edb69cb10b150f68b62dad3a18b5248bba95d1
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_object_extraction-v0-loglikelihood
@@ -0,0 +1 @@
+23ddafdff7b1ebe331b146e23b2c21aa109fe57aa1ce8ca201a0d239fcbdd166
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_2-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_2-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..bc2dc6e1ed3ad7f38496a2de9610db4d145fc41f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_2-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_determiner_noun_agreement_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_2": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..13176ac613358d8dbdb6031f8220a3dcddac815f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-loglikelihood
@@ -0,0 +1 @@
+ccc64b4d5e80c081d5161aae5828212ba49d277ca8c5a4281f181744727a6a99
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_2-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_2-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..4b1a428c4d32831cc6181054631c723408b8382a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_2-v0-loglikelihood
@@ -0,0 +1 @@
+6e6add7baff4217f383425bef58288202018e041b24084edcaa5df8af08f820c
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_inchoative-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_inchoative-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..8d1b39c2d44fc9651099252fbb4c5d4e37c4668d
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_inchoative-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_inchoative": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_inchoative": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_principle_A_case_1-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_principle_A_case_1-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..f325c2e3e34f2d07f90e32517bf236339bd63b48
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_principle_A_case_1-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_principle_A_case_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_case_1": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_2-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_2-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..6d64b97e20bb4688afca5e708f7fc41243ecca14
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_2-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_regular_plural_subject_verb_agreement_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_regular_plural_subject_verb_agreement_2": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_1-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_1-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..b7d2819cb3b61b90bd5efee98e890b486fc02f39
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_1-v0-loglikelihood
@@ -0,0 +1 @@
+8a01f6a5ea87a01c0c9b0c7b3bc4de4711bf0ff050976976651182b9ed34a0d4
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/boolq-v1-loglikelihood b/lm-evaluation-harness/tests/testdata/boolq-v1-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..7811121c9fda0c7ec33c2c36639c8ed8febccb05
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/boolq-v1-loglikelihood
@@ -0,0 +1 @@
+6577e0d88572772ef08e64f624c0e3df0953286ae1f118ccef15623b59ffeabf
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/cb-v0-res.json b/lm-evaluation-harness/tests/testdata/cb-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..ba386fd6c7e67c5048d2f4a4240e1b308dca7db5
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/cb-v0-res.json
@@ -0,0 +1 @@
+{"results": {"cb": {"acc": 0.3392857142857143, "acc_stderr": 0.06384226561930825, "f1": 0.2819143819143819}}, "versions": {"cb": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/cola-v0-res.json b/lm-evaluation-harness/tests/testdata/cola-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..462e5d9401318226da067adcc39b27a09157a127
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/cola-v0-res.json
@@ -0,0 +1 @@
+{"results": {"cola": {"mcc": -0.04538802810223175, "mcc_stderr": 0.023100371589225246}}, "versions": {"cola": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english-v0-res.json b/lm-evaluation-harness/tests/testdata/crows_pairs_english-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..c4210f5f11540d44476cdf99252e9268ca85a6e0
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english-v0-res.json
@@ -0,0 +1 @@
+{"results": {"crows_pairs_english": {"likelihood_difference": 0.3367363060632734, "likelihood_difference_stderr": 0.005827747024053628, "pct_stereotype": 0.5062611806797853, "pct_stereotype_stderr": 0.012212341600228745}}, "versions": {"crows_pairs_english": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_disability-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_english_disability-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..50c7b025631010289ee73762c8f493d8888122d3
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_disability-v0-loglikelihood
@@ -0,0 +1 @@
+90c1bcfdeec0ff51d891ee8cf00ae2a5ec61bab6739faea9865809b8ffed2cdb
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_physical_appearance-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_english_physical_appearance-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..fedfdac52d966f6edcdb229456858da1959b24d1
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_physical_appearance-v0-loglikelihood
@@ -0,0 +1 @@
+d1823f5038afafa7a5338e42531720480c8ccf4e177789526caf294d52d56e89
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_race_color-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_english_race_color-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..9feec03298368b126f4c7361084fb894b8170ffd
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_race_color-v0-loglikelihood
@@ -0,0 +1 @@
+0a750596d77cd96502dc414ff699a399b1b91c2078adeec1d3dd982b3d591089
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french-v0-res.json b/lm-evaluation-harness/tests/testdata/crows_pairs_french-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..77195255653eaebf9f1d542df02b9720c1f37df8
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french-v0-res.json
@@ -0,0 +1 @@
+{"results": {"crows_pairs_french": {"likelihood_difference": 0.3367363060632734, "likelihood_difference_stderr": 0.005827747024053628, "pct_stereotype": 0.5062611806797853, "pct_stereotype_stderr": 0.012212341600228745}}, "versions": {"crows_pairs_french": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_autre-v0-res.json b/lm-evaluation-harness/tests/testdata/crows_pairs_french_autre-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..44d8ff96e413cf6eb458a896d47321a0f3996b70
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_autre-v0-res.json
@@ -0,0 +1 @@
+{"results": {"crows_pairs_french_autre": {"likelihood_difference": 0.3517045997290783, "likelihood_difference_stderr": 0.07647821858130377, "pct_stereotype": 0.23076923076923078, "pct_stereotype_stderr": 0.12162606385262997}}, "versions": {"crows_pairs_french_autre": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_physical_appearance-v0-res.json b/lm-evaluation-harness/tests/testdata/crows_pairs_french_physical_appearance-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..eea3efa006503d2062660ae0e0625c85b4196899
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_physical_appearance-v0-res.json
@@ -0,0 +1 @@
+{"results": {"crows_pairs_french_physical_appearance": {"likelihood_difference": 0.3221673223187262, "likelihood_difference_stderr": 0.026978346460100555, "pct_stereotype": 0.4027777777777778, "pct_stereotype_stderr": 0.05820650942569533}}, "versions": {"crows_pairs_french_physical_appearance": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..d3626ccf80f233702478886fffeede1f587ad2fb
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-astronomy-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-astronomy": {"acc": 0.2565789473684211, "acc_norm": 0.29605263157894735, "acc_norm_stderr": 0.03715062154998904, "acc_stderr": 0.0355418036802569}}, "versions": {"hendrycksTest-astronomy": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-clinical_knowledge-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-clinical_knowledge-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..86f54245d557e0091d1166b7ffb2029520e566e9
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-clinical_knowledge-v0-loglikelihood
@@ -0,0 +1 @@
+fbcb7ce507e0675d811e71e10a67c8d05a6605e29036f46776e04a6588cefbda
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..6705b9cad27c7f1eb647b513861646faaccad584
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-college_biology": {"acc": 0.24305555555555555, "acc_norm": 0.2361111111111111, "acc_norm_stderr": 0.03551446610810826, "acc_stderr": 0.03586879280080341}}, "versions": {"hendrycksTest-college_biology": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-college_physics-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_physics-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..97e56f2ae62e6b0012d49c6a7a55614a6d6eaf58
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_physics-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-college_physics": {"acc": 0.23529411764705882, "acc_norm": 0.23529411764705882, "acc_norm_stderr": 0.04220773659171453, "acc_stderr": 0.04220773659171452}}, "versions": {"hendrycksTest-college_physics": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-computer_security-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-computer_security-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..60f02eba9cb04602d8b67d67269d8b82e0930721
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-computer_security-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-computer_security": {"acc": 0.24, "acc_norm": 0.27, "acc_norm_stderr": 0.044619604333847394, "acc_stderr": 0.042923469599092816}}, "versions": {"hendrycksTest-computer_security": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-formal_logic-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-formal_logic-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..ef6bec3f70adb9b8df43583cf76e6cd865831b0b
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-formal_logic-v0-loglikelihood
@@ -0,0 +1 @@
+c0d0f0c008a5f3faf2f6f4268d87bbc09c40bb66ae08cf38eea0bf2e519c5a59
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_european_history-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_european_history-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..eec5858ef9a22ba66ee0627646b5ce98f2b0326d
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_european_history-v0-loglikelihood
@@ -0,0 +1 @@
+d8070e113be9d420fef5578cb69c70df4ea5118f9b18553023fd9efd5ff0b7f4
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_physics-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_physics-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..49a780bc97953db32716ccc580390c5d21cfc252
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_physics-v0-loglikelihood
@@ -0,0 +1 @@
+dae59e82d3d4d8dec82239d9620b72cc47bb6efbe2f1c2f9b9d23e849c9c5e32
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-human_aging-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-human_aging-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..d34fa529800590ecc8e199fdb9d141c99b8c6876
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-human_aging-v0-loglikelihood
@@ -0,0 +1 @@
+0880b3a78f8d7b17ffc612031427b9085367cf65dabe2a68c4b64e3171d17e88
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-management-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-management-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..7a84623fabf793b7748d34c18f4c358649f31a97
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-management-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-management": {"acc": 0.24271844660194175, "acc_norm": 0.2621359223300971, "acc_norm_stderr": 0.043546310772605956, "acc_stderr": 0.04245022486384495}}, "versions": {"hendrycksTest-management": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-miscellaneous-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-miscellaneous-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..5c7859eb3a80a849deee7d67d37f71a84c8eeaf6
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-miscellaneous-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-miscellaneous": {"acc": 0.23499361430395913, "acc_norm": 0.2515964240102171, "acc_norm_stderr": 0.015517322365529622, "acc_stderr": 0.015162024152278445}}, "versions": {"hendrycksTest-miscellaneous": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-philosophy-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-philosophy-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..3ea8ef0a0e3ddf5cc42c6305e1885e163399f38c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-philosophy-v0-loglikelihood
@@ -0,0 +1 @@
+a419204da36c2b7a70fa8909a3a804260cc3283c7e07917534dfb76216c77f46
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-security_studies-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-security_studies-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..6aa9b5ec005a326616b812b816b95329ad9349a2
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-security_studies-v0-loglikelihood
@@ -0,0 +1 @@
+92dfffe2acf3278256486d3e1cf1edb5a739ad0a54c0f9c67695f7a411ed5f76
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..efd450a8f2a4ca067f7380af809fdda48d1ee465
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada-v0-loglikelihood
@@ -0,0 +1 @@
+6829e6a8aa5922e6c92dd31403cc060f242dc0ede4a775e085a70da095ab2e20
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai_mt_fr-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_fr-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..3c444f66611959e4c13451d306fba403261ecfbb
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_fr-v0-loglikelihood
@@ -0,0 +1 @@
+5d16f4a0c51dc6d7b6df2ebeba2bbfa51e700b843779b559b3d90183d7b02a11
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_standard_cloze-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada_standard_cloze-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..0a7b76241f898374a3a75952e16fe15af9a6d48e
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_standard_cloze-v0-loglikelihood
@@ -0,0 +1 @@
+b604f00bc9f2a77ef41f8cfdb5a8509b3ae9266893b9e90abc665f5399ecba4e
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/math_geometry-v1-greedy_until b/lm-evaluation-harness/tests/testdata/math_geometry-v1-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..1c7362fe44e4432f56f18932b4b429d5cf573399
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/math_geometry-v1-greedy_until
@@ -0,0 +1 @@
+46bc4cb219b6903397da782699a684bdbb982c0c954ff82e6beeed5c84878f42
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/math_prealgebra-v0-res.json b/lm-evaluation-harness/tests/testdata/math_prealgebra-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..b3ada8a6be4d86b71a0c6b92c605d3c8a25a29a2
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/math_prealgebra-v0-res.json
@@ -0,0 +1 @@
+{"results": {"math_prealgebra": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_prealgebra": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/mc_taco-v0-res.json b/lm-evaluation-harness/tests/testdata/mc_taco-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..fc36d1ed3ff4d02330a13eb7431d5413b4c484e5
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/mc_taco-v0-res.json
@@ -0,0 +1 @@
+{"results": {"mc_taco": {"em": 0.07732732732732733, "f1": 0.41600515965511614}}, "versions": {"mc_taco": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/mnli-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/mnli-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..433b76d01094a18991412513044f0933eb0bf3f5
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/mnli-v0-loglikelihood
@@ -0,0 +1 @@
+4fc7b56b8f1e37e38f4a052b227baec2df914c898c3405d3e994726ba4fba976
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/mnli_mismatched-v0-res.json b/lm-evaluation-harness/tests/testdata/mnli_mismatched-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..261deed96275da1af0c8a0616b0af6247cfaf1c0
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/mnli_mismatched-v0-res.json
@@ -0,0 +1 @@
+{"results": {"mnli_mismatched": {"acc": 0.3360455655004068, "acc_stderr": 0.004763973908606819}}, "versions": {"mnli_mismatched": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/mutual_plus-v1-loglikelihood b/lm-evaluation-harness/tests/testdata/mutual_plus-v1-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..f4ba9d37310a19cc7928fd0d599776d8a9da8dba
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/mutual_plus-v1-loglikelihood
@@ -0,0 +1 @@
+b846bb9db109535f59a93d1ce340cf09f68bdf4fed5b8decd168784220fe07fa
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_nih-exporter-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_nih-exporter-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..0e40fc8268a77618471344585bc1a1586fd69e0f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_nih-exporter-v1-res.json
@@ -0,0 +1 @@
+{"results": {"pile_nih-exporter": {"bits_per_byte": 0.00035193728014978225, "byte_perplexity": 1.0002439740903082, "word_perplexity": 1.0016712202288802}}, "versions": {"pile_nih-exporter": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_pile-cc-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_pile-cc-v0-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..d5369ed3c97838d67c2900cfac4aaeb5881ec884
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_pile-cc-v0-loglikelihood_rolling
@@ -0,0 +1 @@
+731fdef4a43949b179ba0c540148ebc2fa41583dd583ef580dd812076c66a451
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_stackexchange-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_stackexchange-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..76fdd0a6dd2f8ca39611601c5cb514664d5dccbc
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_stackexchange-v0-res.json
@@ -0,0 +1 @@
+{"results": {"pile_stackexchange": {"bits_per_byte": 0.0002288815898835956, "byte_perplexity": 1.0002289077852733, "word_perplexity": 1.0016993562258851}}, "versions": {"pile_stackexchange": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_stackexchange-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_stackexchange-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..2773302990f71e46f7f44f5d2e2b624a52ddb54d
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_stackexchange-v1-res.json
@@ -0,0 +1 @@
+{"results": {"pile_stackexchange": {"bits_per_byte": 0.0003302063346758449, "byte_perplexity": 1.0002289077852733, "word_perplexity": 1.0016993562258851}}, "versions": {"pile_stackexchange": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_wikipedia-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_wikipedia-v0-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..e44bd2762803a9b922febf4fe8bfd459e95174b9
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_wikipedia-v0-loglikelihood_rolling
@@ -0,0 +1 @@
+ef9ec0dd408316ca6537228a6812e839f14b30608973081d41efc47c138338da
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/piqa-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/piqa-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..b01b1fe5d8c699f855bff57061d6d63715c7f058
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/piqa-v0-loglikelihood
@@ -0,0 +1 @@
+6048a3a2bb3ad1e6a3d98139618e06b4d7de766edd685bd38837596199c3f69f
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/qa4mre_2012-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/qa4mre_2012-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..0e67fac5f7d54c19e42cae4cfc850089c7c61187
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/qa4mre_2012-v0-loglikelihood
@@ -0,0 +1 @@
+7e17261820acb365966cb9431d93aec983b14393eaeefbc96e30a11cf58bc6df
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/qa4mre_2013-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/qa4mre_2013-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..43243706d9b743cec2965545f3f4436a3e5d7551
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/qa4mre_2013-v0-loglikelihood
@@ -0,0 +1 @@
+52fc431e94c67f983e28ebc70cf45e6c14116b0ae77dc1bf22347c705a65d054
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/record-v0-res.json b/lm-evaluation-harness/tests/testdata/record-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..006c381372178097b36bfac48795e6fbdc242b1a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/record-v0-res.json
@@ -0,0 +1 @@
+{"results": {"record": {"em": 0.1521, "em_stderr": 0.0035913575128186616, "f1": 0.1581870634920636, "f1_stderr": 0.0036146895141474576}}, "versions": {"record": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/webqs-v0-res.json b/lm-evaluation-harness/tests/testdata/webqs-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..9f0fdc76cab096c80a87295773054510803ba218
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/webqs-v0-res.json
@@ -0,0 +1 @@
+{"results": {"webqs": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"webqs": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-ta-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-en-ta-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..1b40263f154461098d6ee820bc0d003c03a6962c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-en-ta-v0-greedy_until
@@ -0,0 +1 @@
+5fc556fa90bca7f1b1396e97e392eac8080b0ad53488358799b8fc0b21a94cb1
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-km-en-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-km-en-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..a6f148661093f5f97c85e96ab2c01da6e40c1547
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-km-en-v0-greedy_until
@@ -0,0 +1 @@
+fb4ec81bb89c70df7e21b43e0e882915b7b71a2a85bb8d4b59e0c7938baaa4c2
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-ps-en-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-ps-en-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..00c9c742e4d34830628a52aa34cfd7faaa86516c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-ps-en-v0-res.json
@@ -0,0 +1 @@
+{"results": {"wmt20-ps-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.015192865365105723, "chrf_stderr": 0.00011334541381539086, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-ps-en": 0}}
\ No newline at end of file
diff --git a/venv/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1600 b/venv/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1600
new file mode 100644
index 0000000000000000000000000000000000000000..1c14cb612e843ca35fac943a7b646ca97a172313
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1600
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e21d6d8ddaa7046449919a1333468e09fa6d549f0f66d9aafd682f9c979ab9a8
+size 2077120
diff --git a/venv/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 b/venv/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0
new file mode 100644
index 0000000000000000000000000000000000000000..183f672e16db7fb231a95df213334e45ca6429c2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:be6f24534f1aa85998fb58a61b9e7124281ab539cbde11f0f40d9d25795c8728
+size 43229425
diff --git a/venv/lib/python3.10/site-packages/torch/lib/libtorch_python.so b/venv/lib/python3.10/site-packages/torch/lib/libtorch_python.so
new file mode 100644
index 0000000000000000000000000000000000000000..6a2568203b824075fcebe033c6652269b83e586e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/torch/lib/libtorch_python.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:f452fe2cde17aa2cf5d6d8a01ca0e818120cb47efee7161dfa93703a0b02aa1d
+size 25990913
diff --git a/venv/lib/python3.10/site-packages/transformers/models/biogpt/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec3d6966ac419d648a7d50801414c7ece1f7325d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__init__.py
@@ -0,0 +1,63 @@
+# Copyright 2022 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
+
+
+_import_structure = {
+ "configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
+ "tokenization_biogpt": ["BioGptTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_biogpt"] = [
+ "BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "BioGptForCausalLM",
+ "BioGptForTokenClassification",
+ "BioGptForSequenceClassification",
+ "BioGptModel",
+ "BioGptPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
+ from .tokenization_biogpt import BioGptTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_biogpt import (
+ BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ BioGptForCausalLM,
+ BioGptForSequenceClassification,
+ BioGptForTokenClassification,
+ BioGptModel,
+ BioGptPreTrainedModel,
+ )
+
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..55a3df92f2593661fbd45fff1f926f1d23593ec0
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/configuration_biogpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/configuration_biogpt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..82ceb5c6f98d05fdd34706c48e18b58dce3305a9
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/configuration_biogpt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/convert_biogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/convert_biogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7d266c6905739fd15df898f06a82ace1899ac00b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/convert_biogpt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/modeling_biogpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/modeling_biogpt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0895589d1d078474fea5d75a61e246be4ab4d952
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/modeling_biogpt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/tokenization_biogpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/tokenization_biogpt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6988c0df6562eceff6a1cb8786be5bb2c975e3c8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/biogpt/__pycache__/tokenization_biogpt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/biogpt/configuration_biogpt.py b/venv/lib/python3.10/site-packages/transformers/models/biogpt/configuration_biogpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b4155c0aea3bbb20ae2947162440a66336c2db5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/biogpt/configuration_biogpt.py
@@ -0,0 +1,134 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" BioGPT model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class BioGptConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`BioGptModel`]. It is used to instantiate an
+ BioGPT model according to the specified arguments, defining the model architecture. Instantiating a configuration
+ with the defaults will yield a similar configuration to that of the BioGPT
+ [microsoft/biogpt](https://huggingface.co/microsoft/biogpt) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 42384):
+ Vocabulary size of the BioGPT model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`BioGptModel`].
+ hidden_size (`int`, *optional*, defaults to 1024):
+ Dimension of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 4096):
+ Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ scale_embedding (`bool`, *optional*, defaults to `True`):
+ Scale embeddings by diving by sqrt(d_model).
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
+ relevant if `config.is_decoder=True`.
+ layerdrop (`float`, *optional*, defaults to 0.0):
+ Please refer to the paper about LayerDrop: https://arxiv.org/abs/1909.11556 for further details
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ pad_token_id (`int`, *optional*, defaults to 1):
+ Padding token id.
+ bos_token_id (`int`, *optional*, defaults to 0):
+ Beginning of stream token id.
+ eos_token_id (`int`, *optional*, defaults to 2):
+ End of stream token id.
+
+ Example:
+
+ ```python
+ >>> from transformers import BioGptModel, BioGptConfig
+
+ >>> # Initializing a BioGPT microsoft/biogpt style configuration
+ >>> configuration = BioGptConfig()
+
+ >>> # Initializing a model from the microsoft/biogpt style configuration
+ >>> model = BioGptModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "biogpt"
+
+ def __init__(
+ self,
+ vocab_size=42384,
+ hidden_size=1024,
+ num_hidden_layers=24,
+ num_attention_heads=16,
+ intermediate_size=4096,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=1024,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ scale_embedding=True,
+ use_cache=True,
+ layerdrop=0.0,
+ activation_dropout=0.0,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ **kwargs,
+ ):
+ self.vocab_size = vocab_size
+ self.max_position_embeddings = max_position_embeddings
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.intermediate_size = intermediate_size
+ self.hidden_act = hidden_act
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.scale_embedding = scale_embedding
+ self.use_cache = use_cache
+ self.layerdrop = layerdrop
+ self.activation_dropout = activation_dropout
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..c930a850462c820a0be1bb3fcee197e3f4571c13
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/biogpt/convert_biogpt_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,292 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import argparse
+import json
+import os
+import re
+import shutil
+
+import torch
+
+from transformers import BioGptConfig, BioGptForCausalLM
+from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
+from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
+from transformers.utils import WEIGHTS_NAME, logging
+
+
+logging.set_verbosity_warning()
+
+json_indent = 2
+
+
+# modified from https://github.com/facebookresearch/fairseq/blob/dd74992d0d143155998e9ed4076826bcea80fb06/fairseq/data/dictionary.py#L18
+class Dictionary:
+ """A mapping from symbols to consecutive integers"""
+
+ def __init__(
+ self,
+ *, # begin keyword-only arguments
+ bos="",
+ pad="",
+ eos="",
+ unk="",
+ extra_special_symbols=None,
+ ):
+ self.bos_word, self.unk_word, self.pad_word, self.eos_word = bos, unk, pad, eos
+ self.symbols = []
+ self.count = []
+ self.indices = {}
+ self.bos_index = self.add_symbol(bos)
+ self.pad_index = self.add_symbol(pad)
+ self.eos_index = self.add_symbol(eos)
+ self.unk_index = self.add_symbol(unk)
+ if extra_special_symbols:
+ for s in extra_special_symbols:
+ self.add_symbol(s)
+ self.nspecial = len(self.symbols)
+
+ def __eq__(self, other):
+ return self.indices == other.indices
+
+ def __getitem__(self, idx):
+ if idx < len(self.symbols):
+ return self.symbols[idx]
+ return self.unk_word
+
+ def __len__(self):
+ """Returns the number of symbols in the dictionary"""
+ return len(self.symbols)
+
+ def __contains__(self, sym):
+ return sym in self.indices
+
+ @classmethod
+ def load(cls, f):
+ """Loads the dictionary from a text file with the format:
+
+ ```
+
+
+ ...
+ ```
+ """
+ d = cls()
+ d.add_from_file(f)
+ return d
+
+ def add_symbol(self, word, n=1, overwrite=False):
+ """Adds a word to the dictionary"""
+ if word in self.indices and not overwrite:
+ idx = self.indices[word]
+ self.count[idx] = self.count[idx] + n
+ return idx
+ else:
+ idx = len(self.symbols)
+ self.indices[word] = idx
+ self.symbols.append(word)
+ self.count.append(n)
+ return idx
+
+ def _load_meta(self, lines):
+ return 0
+
+ def add_from_file(self, f):
+ """
+ Loads a pre-existing dictionary from a text file and adds its symbols to this instance.
+ """
+ if isinstance(f, str):
+ try:
+ with open(f, "r", encoding="utf-8") as fd:
+ self.add_from_file(fd)
+ except FileNotFoundError as fnfe:
+ raise fnfe
+ except UnicodeError:
+ raise Exception("Incorrect encoding detected in {}, please rebuild the dataset".format(f))
+ return
+
+ lines = f.readlines()
+ indices_start_line = self._load_meta(lines)
+
+ for line in lines[indices_start_line:]:
+ try:
+ line, field = line.rstrip().rsplit(" ", 1)
+ if field == "#fairseq:overwrite":
+ overwrite = True
+ line, field = line.rsplit(" ", 1)
+ else:
+ overwrite = False
+ count = int(field)
+ word = line
+ if word in self and not overwrite:
+ raise RuntimeError(
+ "Duplicate word found when loading Dictionary: '{}'. "
+ "Duplicate words can overwrite earlier ones by adding the "
+ "#fairseq:overwrite flag at the end of the corresponding row "
+ "in the dictionary file. If using the Camembert model, please "
+ "download an updated copy of the model file.".format(word)
+ )
+ self.add_symbol(word, n=count, overwrite=overwrite)
+ except ValueError:
+ raise ValueError("Incorrect dictionary format, expected ' [flags]'")
+
+
+def rewrite_dict_keys(d):
+ # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
+ # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er': 7}
+ d2 = dict((re.sub(r"@@$", "", k), v) if k.endswith("@@") else (re.sub(r"$", "", k), v) for k, v in d.items())
+ keep_keys = " ".split()
+ # restore the special tokens
+ for k in keep_keys:
+ del d2[f"{k}"]
+ d2[k] = d[k] # restore
+ return d2
+
+
+def convert_biogpt_checkpoint_to_pytorch(biogpt_checkpoint_path, pytorch_dump_folder_path):
+ # prep
+ if not os.path.exists(biogpt_checkpoint_path):
+ raise ValueError(f"path {biogpt_checkpoint_path} does not exist!")
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
+ print(f"Writing results to {pytorch_dump_folder_path}")
+
+ # handle various types of models
+
+ checkpoint_file = os.path.join(biogpt_checkpoint_path, "checkpoint.pt")
+ if not os.path.isfile(checkpoint_file):
+ raise ValueError(f"path to the file {checkpoint_file} does not exist!")
+ chkpt = torch.load(checkpoint_file, map_location="cpu")
+
+ args = chkpt["cfg"]["model"]
+
+ # dicts
+ dict_file = os.path.join(biogpt_checkpoint_path, "dict.txt")
+ if not os.path.isfile(dict_file):
+ raise ValueError(f"path to the file {dict_file} does not exist!")
+ src_dict = Dictionary.load(dict_file)
+ src_vocab = rewrite_dict_keys(src_dict.indices)
+ src_vocab_size = len(src_vocab)
+ src_vocab_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["vocab_file"])
+ print(f"Generating {src_vocab_file} of {src_vocab_size} records")
+ with open(src_vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent))
+
+ # merges_file (bpecodes)
+ bpecodes_file = os.path.join(biogpt_checkpoint_path, "bpecodes")
+ if not os.path.isfile(bpecodes_file):
+ raise ValueError(f"path to the file {bpecodes_file} does not exist!")
+
+ merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["merges_file"])
+ shutil.copyfile(bpecodes_file, merges_file)
+
+ # model config
+ biogpt_model_config_file = os.path.join(pytorch_dump_folder_path, "config.json")
+
+ model_conf = {
+ "activation_dropout": args["activation_dropout"],
+ "architectures": ["BioGptForCausalLM"],
+ "attention_probs_dropout_prob": args["attention_dropout"],
+ "bos_token_id": 0,
+ "eos_token_id": 2,
+ "hidden_act": args["activation_fn"],
+ "hidden_dropout_prob": args["dropout"],
+ "hidden_size": args["decoder_embed_dim"],
+ "initializer_range": 0.02,
+ "intermediate_size": args["decoder_ffn_embed_dim"],
+ "layer_norm_eps": 1e-12,
+ "layerdrop": args["decoder_layerdrop"],
+ "max_position_embeddings": args["max_target_positions"],
+ "model_type": "biogpt",
+ "num_attention_heads": args["decoder_attention_heads"],
+ "num_hidden_layers": args["decoder_layers"],
+ "pad_token_id": 1,
+ "scale_embedding": not args["no_scale_embedding"],
+ "tie_word_embeddings": args["share_decoder_input_output_embed"],
+ "vocab_size": src_vocab_size,
+ }
+
+ # good hparam defaults to start with
+
+ print(f"Generating {biogpt_model_config_file}")
+ with open(biogpt_model_config_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent))
+
+ # tokenizer config
+ biogpt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE)
+
+ tokenizer_conf = {
+ "bos_token": "",
+ "eos_token": "",
+ "model_max_length": 1024,
+ "pad_token": "",
+ "special_tokens_map_file": None,
+ "tokenizer_class": "BioGptTokenizer",
+ "unk_token": "",
+ }
+
+ print(f"Generating {biogpt_tokenizer_config_file}")
+ with open(biogpt_tokenizer_config_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent))
+
+ # model
+ model_state_dict = chkpt["model"]
+
+ # remove unneeded keys
+ ignore_keys = [
+ "decoder.version",
+ ]
+ for k in ignore_keys:
+ model_state_dict.pop(k, None)
+
+ layer_names = list(model_state_dict.keys())
+ for layer_name in layer_names:
+ if layer_name.endswith("output_projection.weight"):
+ model_state_dict[layer_name.replace("decoder.", "")] = model_state_dict.pop(layer_name)
+ else:
+ model_state_dict[layer_name.replace("decoder", "biogpt")] = model_state_dict.pop(layer_name)
+
+ config = BioGptConfig.from_pretrained(pytorch_dump_folder_path)
+ model_new = BioGptForCausalLM(config)
+
+ # check that it loads ok
+ model_new.load_state_dict(model_state_dict)
+
+ # save
+ pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
+ print(f"Generating {pytorch_weights_dump_path}")
+ torch.save(model_state_dict, pytorch_weights_dump_path)
+
+ print("Conversion is done!")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--biogpt_checkpoint_path",
+ default=None,
+ type=str,
+ required=True,
+ help=(
+ "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
+ " bpecodes, etc."
+ ),
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/biogpt/modeling_biogpt.py b/venv/lib/python3.10/site-packages/transformers/models/biogpt/modeling_biogpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..30df3e0847a6319acaf3f042eb394bf902b84e8a
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/biogpt/modeling_biogpt.py
@@ -0,0 +1,924 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch BioGPT model."""
+
+
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN
+from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ SequenceClassifierOutputWithPast,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_biogpt import BioGptConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "microsoft/biogpt"
+_CONFIG_FOR_DOC = "BioGptConfig"
+
+
+from ..deprecated._archive_maps import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+# Copied from transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding with OPT->BioGpt
+class BioGptLearnedPositionalEmbedding(nn.Embedding):
+ """
+ This module learns positional embeddings up to a fixed maximum size.
+ """
+
+ def __init__(self, num_embeddings: int, embedding_dim: int):
+ # BioGpt is set up so that if padding_idx is specified then offset the embedding ids by 2
+ # and adjust num_embeddings appropriately. Other models don't have this hack
+ self.offset = 2
+ super().__init__(num_embeddings + self.offset, embedding_dim)
+
+ def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0):
+ """`input_ids_shape` is expected to be [bsz x seqlen]."""
+ attention_mask = attention_mask.long()
+
+ # create positions depending on attention_mask
+ positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1
+
+ # cut positions if `past_key_values_length` is > 0
+ positions = positions[:, past_key_values_length:]
+
+ return super().forward(positions + self.offset)
+
+
+# Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BioGpt
+class BioGptAttention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim: int,
+ num_heads: int,
+ dropout: float = 0.0,
+ is_decoder: bool = False,
+ bias: bool = True,
+ is_causal: bool = False,
+ config: Optional[BioGptConfig] = None,
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ self.config = config
+
+ if (self.head_dim * num_heads) != self.embed_dim:
+ raise ValueError(
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
+ f" and `num_heads`: {num_heads})."
+ )
+ self.scaling = self.head_dim**-0.5
+ self.is_decoder = is_decoder
+ self.is_causal = is_causal
+
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ key_value_states: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
+ """Input shape: Batch x Time x Channel"""
+
+ # if key_value_states are provided this layer is used as a cross-attention layer
+ # for the decoder
+ is_cross_attention = key_value_states is not None
+
+ bsz, tgt_len, _ = hidden_states.size()
+
+ # get query proj
+ query_states = self.q_proj(hidden_states) * self.scaling
+ # get key, value proj
+ # `past_key_value[0].shape[2] == key_value_states.shape[1]`
+ # is checking that the `sequence_length` of the `past_key_value` is the same as
+ # the provided `key_value_states` to support prefix tuning
+ if (
+ is_cross_attention
+ and past_key_value is not None
+ and past_key_value[0].shape[2] == key_value_states.shape[1]
+ ):
+ # reuse k,v, cross_attentions
+ key_states = past_key_value[0]
+ value_states = past_key_value[1]
+ elif is_cross_attention:
+ # cross_attentions
+ key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
+ value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
+ elif past_key_value is not None:
+ # reuse k, v, self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ else:
+ # self_attention
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
+
+ if self.is_decoder:
+ # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states.
+ # Further calls to cross_attention layer can then reuse all cross-attention
+ # key/value_states (first "if" case)
+ # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of
+ # all previous decoder key/value_states. Further calls to uni-directional self-attention
+ # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
+ # if encoder bi-directional self-attention `past_key_value` is always `None`
+ past_key_value = (key_states, value_states)
+
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
+ key_states = key_states.reshape(*proj_shape)
+ value_states = value_states.reshape(*proj_shape)
+
+ src_len = key_states.size(1)
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
+
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
+ raise ValueError(
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
+ f" {attn_weights.size()}"
+ )
+
+ if attention_mask is not None:
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
+ raise ValueError(
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
+ )
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ if layer_head_mask.size() != (self.num_heads,):
+ raise ValueError(
+ f"Head mask for a single layer should be of size {(self.num_heads,)}, but is"
+ f" {layer_head_mask.size()}"
+ )
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # this operation is a bit awkward, but it's required to
+ # make sure that attn_weights keeps its gradient.
+ # In order to do so, attn_weights have to be reshaped
+ # twice and have to be reused in the following
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
+
+ attn_output = torch.bmm(attn_probs, value_states)
+
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
+ raise ValueError(
+ f"`attn_output` should be of size {(bsz * self.num_heads, tgt_len, self.head_dim)}, but is"
+ f" {attn_output.size()}"
+ )
+
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(1, 2)
+
+ # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be
+ # partitioned across GPUs when using tensor-parallelism.
+ attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim)
+
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped, past_key_value
+
+
+class BioGptDecoderLayer(nn.Module):
+ def __init__(self, config: BioGptConfig):
+ super().__init__()
+ self.embed_dim = config.hidden_size
+
+ self.self_attn = BioGptAttention(
+ embed_dim=self.embed_dim,
+ num_heads=config.num_attention_heads,
+ dropout=config.attention_probs_dropout_prob,
+ is_decoder=True,
+ )
+ self.dropout = config.hidden_dropout_prob
+ self.activation_fn = ACT2FN[config.hidden_act]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ self.fc1 = nn.Linear(self.embed_dim, config.intermediate_size)
+ self.fc2 = nn.Linear(config.intermediate_size, self.embed_dim)
+ self.final_layer_norm = nn.LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ layer_head_mask: Optional[torch.Tensor] = None,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ output_attentions: Optional[bool] = False,
+ use_cache: Optional[bool] = True,
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
+ """
+ Args:
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
+ attention_mask (`torch.FloatTensor`): attention mask of size
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ `(encoder_attention_heads,)`.
+ past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
+ returned tensors for more detail.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
+ (see `past_key_values`).
+ """
+ residual = hidden_states
+
+ hidden_states = self.self_attn_layer_norm(hidden_states)
+
+ # Self Attention
+ # decoder uni-directional self-attention cached key/values tuple is at positions 1,2
+ self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None
+ # add present self-attn cache to positions 1,2 of present_key_value tuple
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
+ hidden_states=hidden_states,
+ past_key_value=self_attn_past_key_value,
+ attention_mask=attention_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ # Fully Connected
+ residual = hidden_states
+ hidden_states = self.final_layer_norm(hidden_states)
+ hidden_states = self.fc1(hidden_states)
+ hidden_states = self.activation_fn(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
+ hidden_states = self.fc2(hidden_states)
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+ hidden_states = residual + hidden_states
+
+ outputs = (hidden_states,)
+
+ if output_attentions:
+ outputs += (self_attn_weights,)
+
+ if use_cache:
+ outputs += (present_key_value,)
+
+ return outputs
+
+
+class BioGptPreTrainedModel(PreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = BioGptConfig
+ base_model_prefix = "biogpt"
+ supports_gradient_checkpointing = True
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+BIOGPT_START_DOCSTRING = r"""
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
+ it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
+ behavior.
+
+ Parameters:
+ config ([`~BioGptConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+BIOGPT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
+ Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape
+ `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape
+ `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`.
+
+ Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
+ blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
+
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
+ than the model's internal embedding lookup matrix.
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare BioGPT Model transformer outputting raw hidden-states without any specific head on top.",
+ BIOGPT_START_DOCSTRING,
+)
+class BioGptModel(BioGptPreTrainedModel):
+ def __init__(self, config: BioGptConfig):
+ super().__init__(config)
+ self.config = config
+ self.layerdrop = config.layerdrop
+ self.dropout = config.hidden_dropout_prob
+ self.embed_dim = config.hidden_size
+ self.padding_idx = config.pad_token_id
+ self.embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
+
+ self.embed_tokens = nn.Embedding(config.vocab_size, self.embed_dim, self.padding_idx)
+ self.embed_positions = BioGptLearnedPositionalEmbedding(config.max_position_embeddings, self.embed_dim)
+
+ self.layers = nn.ModuleList([BioGptDecoderLayer(config) for _ in range(config.num_hidden_layers)])
+ self.layer_norm = nn.LayerNorm(self.embed_dim)
+
+ self.gradient_checkpointing = False
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.embed_tokens = value
+
+ @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # retrieve input_ids and inputs_embeds
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input = input_ids
+ input_shape = input.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ input = inputs_embeds[:, :, -1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ # past_key_values_length
+ past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0
+
+ if inputs_embeds is None:
+ inputs_embeds = self.embed_tokens(input) * self.embed_scale
+
+ if attention_mask is None:
+ attention_mask = torch.ones(
+ (inputs_embeds.shape[0], inputs_embeds.shape[1] + past_key_values_length),
+ dtype=torch.bool,
+ device=inputs_embeds.device,
+ )
+ elif attention_mask.shape[1] != past_key_values_length + input_shape[1]:
+ raise ValueError(
+ f"The provided attention mask has length {attention_mask.shape[1]}, but its length should be "
+ f"{past_key_values_length + input_shape[1]} (sum of the lengths of current and past inputs)"
+ )
+
+ # embed positions
+ positions = self.embed_positions(attention_mask, past_key_values_length)
+
+ attention_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, input_shape, inputs_embeds, past_key_values_length
+ )
+
+ hidden_states = inputs_embeds + positions
+
+ hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attentions = None
+ next_decoder_cache = () if use_cache else None
+
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ past_key_value = past_key_values[idx] if past_key_values is not None else None
+
+ if self.gradient_checkpointing and self.training:
+ layer_outputs = self._gradient_checkpointing_func(
+ decoder_layer.__call__,
+ hidden_states,
+ attention_mask,
+ head_mask[idx] if head_mask is not None else None,
+ None,
+ output_attentions,
+ use_cache,
+ )
+ else:
+ layer_outputs = decoder_layer(
+ hidden_states,
+ attention_mask=attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ past_key_value=past_key_value,
+ output_attentions=output_attentions,
+ use_cache=use_cache,
+ )
+
+ hidden_states = layer_outputs[0]
+
+ if use_cache:
+ next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
+
+ if output_attentions:
+ all_self_attns += (layer_outputs[1],)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ all_hidden_states += (hidden_states,)
+
+ hidden_states = self.layer_norm(hidden_states)
+
+ next_cache = next_decoder_cache if use_cache else None
+
+ if not return_dict:
+ return tuple(
+ v
+ for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions]
+ if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attentions,
+ )
+
+
+@add_start_docstrings(
+ """BioGPT Model with a `language modeling` head on top for CLM fine-tuning.""", BIOGPT_START_DOCSTRING
+)
+class BioGptForCausalLM(BioGptPreTrainedModel):
+ _tied_weights_keys = ["output_projection.weight"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.biogpt = BioGptModel(config)
+ self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.output_projection
+
+ def set_output_embeddings(self, new_embeddings):
+ self.output_projection = new_embeddings
+
+ @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutputWithCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.biogpt(
+ input_ids,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.output_projection(sequence_output)
+
+ lm_loss = None
+ if labels is not None:
+ # we are doing next-token prediction; shift prediction scores and input ids by one
+ shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
+ labels = labels[:, 1:].contiguous()
+ loss_fct = CrossEntropyLoss()
+ lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[1:]
+ return ((lm_loss,) + output) if lm_loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=lm_loss,
+ logits=prediction_scores,
+ past_key_values=outputs.past_key_values,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ cross_attentions=outputs.cross_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self, input_ids, attention_mask, inputs_embeds=None, past_key_values=None, **kwargs
+ ):
+ # only last tokens for inputs_ids if past is defined in kwargs
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "attention_mask": attention_mask,
+ "past_key_values": past_key_values,
+ "use_cache": kwargs.get("use_cache"),
+ }
+ )
+
+ return model_inputs
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = ()
+ for layer_past in past_key_values:
+ reordered_past += (
+ tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ BioGPT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ BIOGPT_START_DOCSTRING,
+)
+class BioGptForTokenClassification(BioGptPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.biogpt = BioGptModel(config)
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
+ classifier_dropout = config.classifier_dropout
+ else:
+ classifier_dropout = config.hidden_dropout_prob
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ token_type_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.biogpt(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = transformer_outputs[0]
+ hidden_states = self.dropout(hidden_states)
+ logits = self.classifier(hidden_states)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ # Only keep active parts of the loss
+ if attention_mask is not None:
+ active_loss = attention_mask.view(-1) == 1
+ active_logits = logits.view(-1, self.num_labels)
+ active_labels = torch.where(
+ active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
+ )
+ loss = loss_fct(active_logits, active_labels)
+ else:
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The BioGpt Model transformer with a sequence classification head on top (linear layer).
+
+ [`BioGptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-2) do.
+
+ Since it does classification on the last token, it is required to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ BIOGPT_START_DOCSTRING,
+)
+class BioGptForSequenceClassification(BioGptPreTrainedModel):
+ def __init__(self, config: BioGptConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.biogpt = BioGptModel(config)
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(BIOGPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.biogpt(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size, sequence_length = input_ids.shape[:2]
+ else:
+ batch_size, sequence_length = inputs_embeds.shape[:2]
+
+ if self.config.pad_token_id is None:
+ sequence_length = -1
+ else:
+ if input_ids is not None:
+ sequence_length = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
+ else:
+ sequence_length = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_length]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def get_input_embeddings(self):
+ return self.biogpt.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.biogpt.embed_tokens = value
diff --git a/venv/lib/python3.10/site-packages/transformers/models/biogpt/tokenization_biogpt.py b/venv/lib/python3.10/site-packages/transformers/models/biogpt/tokenization_biogpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..e16742ec5aa4f0eb2be900aac4c74bb1221761cc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/biogpt/tokenization_biogpt.py
@@ -0,0 +1,357 @@
+# coding=utf-8
+# Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for BioGPT."""
+import json
+import os
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "vocab_file": "vocab.json",
+ "merges_file": "merges.txt",
+}
+
+
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
+ strings)
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+class BioGptTokenizer(PreTrainedTokenizer):
+ """
+ Construct an FAIRSEQ Transformer tokenizer. Moses tokenization followed by Byte-Pair Encoding.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ merges_file (`str`):
+ Merges file.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ merges_file,
+ unk_token="",
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ pad_token="",
+ **kwargs,
+ ):
+ try:
+ import sacremoses
+ except ImportError:
+ raise ImportError(
+ "You need to install sacremoses to use BioGptTokenizer. "
+ "See https://pypi.org/project/sacremoses/ for installation."
+ )
+
+ self.lang = "en"
+ self.sm = sacremoses
+ # cache of sm.MosesTokenizer instance
+ self.cache_moses_tokenizer = {}
+ self.cache_moses_detokenizer = {}
+
+ """ Initialisation"""
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
+ self.encoder = json.load(vocab_handle)
+ self.decoder = {v: k for k, v in self.encoder.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ merges = merges_handle.read().split("\n")[:-1]
+ merges = [tuple(merge.split()[:2]) for merge in merges]
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
+ self.cache = {}
+
+ super().__init__(
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ **kwargs,
+ )
+
+ @property
+ def vocab_size(self):
+ """Returns vocab size"""
+ return len(self.encoder)
+
+ def get_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ def moses_tokenize(self, text, lang):
+ if lang not in self.cache_moses_tokenizer:
+ moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
+ self.cache_moses_tokenizer[lang] = moses_tokenizer
+ return self.cache_moses_tokenizer[lang].tokenize(
+ text, aggressive_dash_splits=True, return_str=False, escape=True
+ )
+
+ def moses_detokenize(self, tokens, lang):
+ if lang not in self.cache_moses_detokenizer:
+ moses_detokenizer = self.sm.MosesDetokenizer(lang=lang)
+ self.cache_moses_detokenizer[lang] = moses_detokenizer
+ return self.cache_moses_detokenizer[lang].detokenize(tokens)
+
+ def bpe(self, token):
+ word = tuple(token[:-1]) + (token[-1] + "",)
+ if token in self.cache:
+ return self.cache[token]
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token + ""
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ if word == "\n ":
+ word = "\n"
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text, bypass_tokenizer=False):
+ """Returns a tokenized string."""
+ if bypass_tokenizer:
+ text = text.split()
+ else:
+ text = self.moses_tokenize(text, self.lang)
+
+ split_tokens = []
+ for token in text:
+ if token:
+ split_tokens.extend(list(self.bpe(token).split(" ")))
+
+ return split_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ # remove BPE
+ tokens = [t.replace(" ", "").replace("", " ") for t in tokens]
+ tokens = "".join(tokens).split()
+ # detokenize
+ text = self.moses_detokenize(tokens, self.lang)
+ return text
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BioGPT sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.sep_token_id] + token_ids_0
+ sep = [self.sep_token_id]
+ return sep + token_ids_0 + sep + token_ids_1
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+ # no bos used in fairseq
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1))
+ return [1] + ([0] * len(token_ids_0))
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ
+ Transformer sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+
+ # no bos used in fairseq
+ if token_ids_1 is None:
+ return len(token_ids_0 + sep) * [0]
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ merge_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merge_file, "w", encoding="utf-8") as writer:
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return vocab_file, merge_file
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sm"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ try:
+ import sacremoses
+ except ImportError:
+ raise ImportError(
+ "You need to install sacremoses to use XLMTokenizer. "
+ "See https://pypi.org/project/sacremoses/ for installation."
+ )
+
+ self.sm = sacremoses
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fsmt/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..65aba047469da14c6b25523fba31432e823ec47d
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__init__.py
@@ -0,0 +1,49 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_fsmt": ["FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FSMTConfig"],
+ "tokenization_fsmt": ["FSMTTokenizer"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_fsmt"] = ["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"]
+
+
+if TYPE_CHECKING:
+ from .configuration_fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig
+ from .tokenization_fsmt import FSMTTokenizer
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/modeling_fsmt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/modeling_fsmt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9532ced34b799bb8094fb6df0e4ab91cbf851744
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/modeling_fsmt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fsmt/configuration_fsmt.py b/venv/lib/python3.10/site-packages/transformers/models/fsmt/configuration_fsmt.py
new file mode 100644
index 0000000000000000000000000000000000000000..68abe47c019abaae981eb0beedcdbb7c755dff2e
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/fsmt/configuration_fsmt.py
@@ -0,0 +1,219 @@
+# coding=utf-8
+# Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" FSMT configuration"""
+
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class DecoderConfig(PretrainedConfig):
+ r"""
+ Configuration class for FSMT's decoder specific things. note: this is a private helper class
+ """
+
+ model_type = "fsmt_decoder"
+
+ def __init__(self, vocab_size=0, bos_token_id=0):
+ super().__init__()
+ self.vocab_size = vocab_size
+ self.bos_token_id = bos_token_id
+
+
+class FSMTConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`FSMTModel`]. It is used to instantiate a FSMT
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to that of the FSMT
+ [facebook/wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ langs (`List[str]`):
+ A list with source language and target_language (e.g., ['en', 'ru']).
+ src_vocab_size (`int`):
+ Vocabulary size of the encoder. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed to the forward method in the encoder.
+ tgt_vocab_size (`int`):
+ Vocabulary size of the decoder. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed to the forward method in the decoder.
+ d_model (`int`, *optional*, defaults to 1024):
+ Dimensionality of the layers and the pooler layer.
+ encoder_layers (`int`, *optional*, defaults to 12):
+ Number of encoder layers.
+ decoder_layers (`int`, *optional*, defaults to 12):
+ Number of decoder layers.
+ encoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ decoder_attention_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer decoder.
+ decoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ encoder_ffn_dim (`int`, *optional*, defaults to 4096):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
+ activation_function (`str` or `Callable`, *optional*, defaults to `"relu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ dropout (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for the attention probabilities.
+ activation_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout ratio for activations inside the fully connected layer.
+ max_position_embeddings (`int`, *optional*, defaults to 1024):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ init_std (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ scale_embedding (`bool`, *optional*, defaults to `True`):
+ Scale embeddings by diving by sqrt(d_model).
+ bos_token_id (`int`, *optional*, defaults to 0)
+ Beginning of stream token id.
+ pad_token_id (`int`, *optional*, defaults to 1)
+ Padding token id.
+ eos_token_id (`int`, *optional*, defaults to 2)
+ End of stream token id.
+ decoder_start_token_id (`int`, *optional*):
+ This model starts decoding with `eos_token_id`
+ encoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ Google "layerdrop arxiv", as its not explainable in one line.
+ decoder_layerdrop (`float`, *optional*, defaults to 0.0):
+ Google "layerdrop arxiv", as its not explainable in one line.
+ is_encoder_decoder (`bool`, *optional*, defaults to `True`):
+ Whether this is an encoder/decoder model.
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
+ Whether to tie input and output embeddings.
+ num_beams (`int`, *optional*, defaults to 5)
+ Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
+ no beam search.
+ length_penalty (`float`, *optional*, defaults to 1)
+ Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
+ the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
+ likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
+ `length_penalty` < 0.0 encourages shorter sequences.
+ early_stopping (`bool`, *optional*, defaults to `False`)
+ Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
+ when at least `num_beams` sentences are finished per batch or not.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ forced_eos_token_id (`int`, *optional*, defaults to 2):
+ The id of the token to force as the last generated token when `max_length` is reached. Usually set to
+ `eos_token_id`.
+
+ Examples:
+
+ ```python
+ >>> from transformers import FSMTConfig, FSMTModel
+
+ >>> # Initializing a FSMT facebook/wmt19-en-ru style configuration
+ >>> config = FSMTConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = FSMTModel(config)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "fsmt"
+ attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
+
+ # update the defaults from config file
+ def __init__(
+ self,
+ langs=["en", "de"],
+ src_vocab_size=42024,
+ tgt_vocab_size=42024,
+ activation_function="relu",
+ d_model=1024,
+ max_length=200,
+ max_position_embeddings=1024,
+ encoder_ffn_dim=4096,
+ encoder_layers=12,
+ encoder_attention_heads=16,
+ encoder_layerdrop=0.0,
+ decoder_ffn_dim=4096,
+ decoder_layers=12,
+ decoder_attention_heads=16,
+ decoder_layerdrop=0.0,
+ attention_dropout=0.0,
+ dropout=0.1,
+ activation_dropout=0.0,
+ init_std=0.02,
+ decoder_start_token_id=2,
+ is_encoder_decoder=True,
+ scale_embedding=True,
+ tie_word_embeddings=False,
+ num_beams=5,
+ length_penalty=1.0,
+ early_stopping=False,
+ use_cache=True,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ forced_eos_token_id=2,
+ **common_kwargs,
+ ):
+ self.langs = langs
+ self.src_vocab_size = src_vocab_size
+ self.tgt_vocab_size = tgt_vocab_size
+ self.d_model = d_model # encoder_embed_dim and decoder_embed_dim
+
+ self.encoder_ffn_dim = encoder_ffn_dim
+ self.encoder_layers = self.num_hidden_layers = encoder_layers
+ self.encoder_attention_heads = encoder_attention_heads
+ self.encoder_layerdrop = encoder_layerdrop
+ self.decoder_layerdrop = decoder_layerdrop
+ self.decoder_ffn_dim = decoder_ffn_dim
+ self.decoder_layers = decoder_layers
+ self.decoder_attention_heads = decoder_attention_heads
+ self.max_position_embeddings = max_position_embeddings
+ self.init_std = init_std # Normal(0, this parameter)
+ self.activation_function = activation_function
+
+ self.decoder = DecoderConfig(vocab_size=tgt_vocab_size, bos_token_id=eos_token_id)
+ if "decoder" in common_kwargs:
+ del common_kwargs["decoder"]
+
+ self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
+
+ # 3 Types of Dropout
+ self.attention_dropout = attention_dropout
+ self.activation_dropout = activation_dropout
+ self.dropout = dropout
+
+ self.use_cache = use_cache
+ super().__init__(
+ pad_token_id=pad_token_id,
+ bos_token_id=bos_token_id,
+ eos_token_id=eos_token_id,
+ decoder_start_token_id=decoder_start_token_id,
+ is_encoder_decoder=is_encoder_decoder,
+ tie_word_embeddings=tie_word_embeddings,
+ forced_eos_token_id=forced_eos_token_id,
+ max_length=max_length,
+ num_beams=num_beams,
+ length_penalty=length_penalty,
+ early_stopping=early_stopping,
+ **common_kwargs,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef2764f0ed10bace714f42f5f74ea6d9a147c613
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/fsmt/convert_fsmt_original_pytorch_checkpoint_to_pytorch.py
@@ -0,0 +1,280 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Note: if you intend to run this script make sure you look under scripts/fsmt/
+# to locate the appropriate script to do the work correctly. There is a set of scripts to:
+# - download and prepare data and run the conversion script
+# - perform eval to get the best hparam into the config
+# - generate model_cards - useful if you have multiple models from the same paper
+
+import argparse
+import json
+import os
+import re
+from collections import OrderedDict
+from os.path import basename, dirname
+
+import fairseq
+import torch
+from fairseq import hub_utils
+from fairseq.data.dictionary import Dictionary
+
+from transformers import FSMTConfig, FSMTForConditionalGeneration
+from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
+from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
+from transformers.utils import WEIGHTS_NAME, logging
+
+
+logging.set_verbosity_warning()
+
+json_indent = 2
+
+# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
+# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
+#
+# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
+# * `early_stopping`: `False` consistently scored better
+# * `length_penalty` varied, so will assign the best one depending on the model
+best_score_hparams = {
+ # fairseq:
+ "wmt19-ru-en": {"length_penalty": 1.1},
+ "wmt19-en-ru": {"length_penalty": 1.15},
+ "wmt19-en-de": {"length_penalty": 1.0},
+ "wmt19-de-en": {"length_penalty": 1.1},
+ # allenai:
+ "wmt16-en-de-dist-12-1": {"length_penalty": 0.6},
+ "wmt16-en-de-dist-6-1": {"length_penalty": 0.6},
+ "wmt16-en-de-12-1": {"length_penalty": 0.8},
+ "wmt19-de-en-6-6-base": {"length_penalty": 0.6},
+ "wmt19-de-en-6-6-big": {"length_penalty": 0.6},
+}
+
+# this remaps the different models to their organization names
+org_names = {}
+for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
+ org_names[m] = "facebook"
+for m in [
+ "wmt16-en-de-dist-12-1",
+ "wmt16-en-de-dist-6-1",
+ "wmt16-en-de-12-1",
+ "wmt19-de-en-6-6-base",
+ "wmt19-de-en-6-6-big",
+]:
+ org_names[m] = "allenai"
+
+
+def rewrite_dict_keys(d):
+ # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
+ # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er': 7}
+ d2 = dict((re.sub(r"@@$", "", k), v) if k.endswith("@@") else (re.sub(r"$", "", k), v) for k, v in d.items())
+ keep_keys = " ".split()
+ # restore the special tokens
+ for k in keep_keys:
+ del d2[f"{k}"]
+ d2[k] = d[k] # restore
+ return d2
+
+
+def convert_fsmt_checkpoint_to_pytorch(fsmt_checkpoint_path, pytorch_dump_folder_path):
+ # prep
+ assert os.path.exists(fsmt_checkpoint_path)
+ os.makedirs(pytorch_dump_folder_path, exist_ok=True)
+ print(f"Writing results to {pytorch_dump_folder_path}")
+
+ # handle various types of models
+
+ checkpoint_file = basename(fsmt_checkpoint_path)
+ fsmt_folder_path = dirname(fsmt_checkpoint_path)
+
+ cls = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
+ models = cls.hub_models()
+ kwargs = {"bpe": "fastbpe", "tokenizer": "moses"}
+ data_name_or_path = "."
+ # note: since the model dump is old, fairseq has upgraded its model some
+ # time later, and it does a whole lot of rewrites and splits on the saved
+ # weights, therefore we can't use torch.load() directly on the model file.
+ # see: upgrade_state_dict(state_dict) in fairseq_model.py
+ print(f"using checkpoint {checkpoint_file}")
+ chkpt = hub_utils.from_pretrained(
+ fsmt_folder_path, checkpoint_file, data_name_or_path, archive_map=models, **kwargs
+ )
+
+ args = vars(chkpt["args"]["model"])
+
+ src_lang = args["source_lang"]
+ tgt_lang = args["target_lang"]
+
+ data_root = dirname(pytorch_dump_folder_path)
+ model_dir = basename(pytorch_dump_folder_path)
+
+ # dicts
+ src_dict_file = os.path.join(fsmt_folder_path, f"dict.{src_lang}.txt")
+ tgt_dict_file = os.path.join(fsmt_folder_path, f"dict.{tgt_lang}.txt")
+
+ src_dict = Dictionary.load(src_dict_file)
+ src_vocab = rewrite_dict_keys(src_dict.indices)
+ src_vocab_size = len(src_vocab)
+ src_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-src.json")
+ print(f"Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records")
+ with open(src_vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(src_vocab, ensure_ascii=False, indent=json_indent))
+
+ # detect whether this is a do_lower_case situation, which can be derived by checking whether we
+ # have at least one uppercase letter in the source vocab
+ do_lower_case = True
+ for k in src_vocab.keys():
+ if not k.islower():
+ do_lower_case = False
+ break
+
+ tgt_dict = Dictionary.load(tgt_dict_file)
+ tgt_vocab = rewrite_dict_keys(tgt_dict.indices)
+ tgt_vocab_size = len(tgt_vocab)
+ tgt_vocab_file = os.path.join(pytorch_dump_folder_path, "vocab-tgt.json")
+ print(f"Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records")
+ with open(tgt_vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(tgt_vocab, ensure_ascii=False, indent=json_indent))
+
+ # merges_file (bpecodes)
+ merges_file = os.path.join(pytorch_dump_folder_path, VOCAB_FILES_NAMES["merges_file"])
+ for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
+ fsmt_merges_file = os.path.join(fsmt_folder_path, fn)
+ if os.path.exists(fsmt_merges_file):
+ break
+ with open(fsmt_merges_file, encoding="utf-8") as fin:
+ merges = fin.read()
+ merges = re.sub(r" \d+$", "", merges, 0, re.M) # remove frequency number
+ print(f"Generating {merges_file}")
+ with open(merges_file, "w", encoding="utf-8") as fout:
+ fout.write(merges)
+
+ # model config
+ fsmt_model_config_file = os.path.join(pytorch_dump_folder_path, "config.json")
+
+ # validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
+ # may have to modify the tokenizer if a different type is used by a future model
+ assert args["bpe"] == "fastbpe", f"need to extend tokenizer to support bpe={args['bpe']}"
+ assert args["tokenizer"] == "moses", f"need to extend tokenizer to support bpe={args['tokenizer']}"
+
+ model_conf = {
+ "architectures": ["FSMTForConditionalGeneration"],
+ "model_type": "fsmt",
+ "activation_dropout": args["activation_dropout"],
+ "activation_function": "relu",
+ "attention_dropout": args["attention_dropout"],
+ "d_model": args["decoder_embed_dim"],
+ "dropout": args["dropout"],
+ "init_std": 0.02,
+ "max_position_embeddings": args["max_source_positions"],
+ "num_hidden_layers": args["encoder_layers"],
+ "src_vocab_size": src_vocab_size,
+ "tgt_vocab_size": tgt_vocab_size,
+ "langs": [src_lang, tgt_lang],
+ "encoder_attention_heads": args["encoder_attention_heads"],
+ "encoder_ffn_dim": args["encoder_ffn_embed_dim"],
+ "encoder_layerdrop": args["encoder_layerdrop"],
+ "encoder_layers": args["encoder_layers"],
+ "decoder_attention_heads": args["decoder_attention_heads"],
+ "decoder_ffn_dim": args["decoder_ffn_embed_dim"],
+ "decoder_layerdrop": args["decoder_layerdrop"],
+ "decoder_layers": args["decoder_layers"],
+ "bos_token_id": 0,
+ "pad_token_id": 1,
+ "eos_token_id": 2,
+ "is_encoder_decoder": True,
+ "scale_embedding": not args["no_scale_embedding"],
+ "tie_word_embeddings": args["share_all_embeddings"],
+ }
+
+ # good hparam defaults to start with
+ model_conf["num_beams"] = 5
+ model_conf["early_stopping"] = False
+ if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
+ model_conf["length_penalty"] = best_score_hparams[model_dir]["length_penalty"]
+ else:
+ model_conf["length_penalty"] = 1.0
+
+ print(f"Generating {fsmt_model_config_file}")
+ with open(fsmt_model_config_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(model_conf, ensure_ascii=False, indent=json_indent))
+
+ # tokenizer config
+ fsmt_tokenizer_config_file = os.path.join(pytorch_dump_folder_path, TOKENIZER_CONFIG_FILE)
+
+ tokenizer_conf = {
+ "langs": [src_lang, tgt_lang],
+ "model_max_length": 1024,
+ "do_lower_case": do_lower_case,
+ }
+
+ print(f"Generating {fsmt_tokenizer_config_file}")
+ with open(fsmt_tokenizer_config_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(tokenizer_conf, ensure_ascii=False, indent=json_indent))
+
+ # model
+ model = chkpt["models"][0]
+ model_state_dict = model.state_dict()
+
+ # rename keys to start with 'model.'
+ model_state_dict = OrderedDict(("model." + k, v) for k, v in model_state_dict.items())
+
+ # remove unneeded keys
+ ignore_keys = [
+ "model.model",
+ "model.encoder.version",
+ "model.decoder.version",
+ "model.encoder_embed_tokens.weight",
+ "model.decoder_embed_tokens.weight",
+ "model.encoder.embed_positions._float_tensor",
+ "model.decoder.embed_positions._float_tensor",
+ ]
+ for k in ignore_keys:
+ model_state_dict.pop(k, None)
+
+ config = FSMTConfig.from_pretrained(pytorch_dump_folder_path)
+ model_new = FSMTForConditionalGeneration(config)
+
+ # check that it loads ok
+ model_new.load_state_dict(model_state_dict, strict=False)
+
+ # save
+ pytorch_weights_dump_path = os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)
+ print(f"Generating {pytorch_weights_dump_path}")
+ torch.save(model_state_dict, pytorch_weights_dump_path)
+
+ print("Conversion is done!")
+ print("\nLast step is to upload the files to s3")
+ print(f"cd {data_root}")
+ print(f"transformers-cli upload {model_dir}")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+ # Required parameters
+ parser.add_argument(
+ "--fsmt_checkpoint_path",
+ default=None,
+ type=str,
+ required=True,
+ help=(
+ "Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"
+ " bpecodes, etc."
+ ),
+ )
+ parser.add_argument(
+ "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
+ )
+ args = parser.parse_args()
+ convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fsmt/modeling_fsmt.py b/venv/lib/python3.10/site-packages/transformers/models/fsmt/modeling_fsmt.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c180c52678b82b80c5a1aa43f42292556c8f1e4
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/fsmt/modeling_fsmt.py
@@ -0,0 +1,1386 @@
+# coding=utf-8
+# Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Original implementation: https://github.com/pytorch/fairseq/tree/master/examples/wmt19
+# Authors:
+# - @alexeib Alexei Baevski
+# - @edunov Sergey Edunov
+# - @michaelauli Michael Auli
+# - @myleott Myle Ott
+# - @nng555 Nathan Ng
+# - David Grangier
+# - Kyra Yee
+#
+# Paper: Facebook FAIR's WMT19 News Translation Task Submission https://arxiv.org/abs/1907.06616
+#
+"""PyTorch Fairseq model, ported from https://github.com/pytorch/fairseq/tree/master/examples/wmt19"""
+
+import math
+from typing import Any, Dict, List, Optional, Tuple, Union
+
+import torch
+from torch import Tensor, nn
+from torch.nn import CrossEntropyLoss, LayerNorm
+
+from ...activations import ACT2FN
+from ...integrations.deepspeed import is_deepspeed_zero3_enabled
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPastAndCrossAttentions,
+ Seq2SeqLMOutput,
+ Seq2SeqModelOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import (
+ add_code_sample_docstrings,
+ add_end_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+ replace_return_docstrings,
+)
+from .configuration_fsmt import FSMTConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "facebook/wmt19-ru-en"
+_CONFIG_FOR_DOC = "FSMTConfig"
+
+# See all FSMT models at https://huggingface.co/models?filter=fsmt
+
+# Porting notes:
+# this one is modeled after BartModel*
+#
+# Currently only translation (fairseq also has weights for LM)
+#
+# fairseq provides weights for ru-en, en-ru and de-en, en-de pairs. All have been ported.
+# - ru-en, en-ru use asymmetric vocab
+# - de-en, en-de use a merged single vocab (but the code works as if they are separate)
+#
+# Differences with Bart:
+# - not using bos token
+# - 2 separate vocabs (src and target)
+# - embed weights aren't tied
+# - uses a model Ensemble (but that part isn't ported/implemented yet) - so we
+# aren't getting as good of a BLEU score
+# - uses a projection layer at the end of the decoder
+# - doesn't use final_logits_bias
+# - beam search: stops as soon as num_beams == len(hypos) (whereas transformers
+# is not satisfied there and will continue searching until the next cycles
+# aren't promising something better), comparing BLEU scores - the transformers
+# algorithm is slightly superior, therefore using the latter. But if you want
+# to match fairseq outputs, you need to pass ``early_stopping=True`` to ``generate()``.
+#
+# SinusoidalPositionalEmbedding is slightly different from Bart's - generates
+# different embeddings. This implementation is copied verbatim from fairseq with
+# some small changes to make it work here.
+#
+# Other changes:
+# - doesn't support use_cache as Bart's version does
+#
+#
+# FSMTConfig changes with BartConfig
+#
+# Differences with BART:
+# - src/tgt vocabs aren't shared
+# - token embeddings aren't shared
+# - needs a language pair
+# - scale_embedding are True
+#
+# some unused args were removed too
+#
+#
+# TODO:
+# - port model ensemble (fs uses 4 model checkpoints)
+# - solve beam search discrepancies
+# docstyle-ignore
+
+"""
+
+Here is how to compare BLEU scores against fairseq implementation:
+
+# en-ru
+
+export PAIR=en-ru
+export DATA_DIR=data/$PAIR
+export SAVE_DIR=data/$PAIR
+export BS=8
+export NUM_BEAMS=50
+mkdir -p $DATA_DIR
+sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
+sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
+echo $PAIR
+PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
+
+# (fairseq BLEU: 36.4 http://matrix.statmt.org/matrix/output/1914?score_id=37605)
+
+
+# ru-en
+
+export PAIR=ru-en
+export DATA_DIR=data/$PAIR
+export SAVE_DIR=data/$PAIR
+export BS=8
+export NUM_BEAMS=50
+mkdir -p $DATA_DIR
+sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
+sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
+PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
+
+
+# (fairseq BLEU: 41.3 http://matrix.statmt.org/matrix/output/1907?run_id=6937)
+
+
+# de-en
+
+export PAIR=de-en
+export DATA_DIR=data/$PAIR
+export SAVE_DIR=data/$PAIR
+export BS=8
+export NUM_BEAMS=50
+mkdir -p $DATA_DIR
+sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
+sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
+echo $PAIR
+PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
+
+# (fairseq BLEU: 42.3 http://matrix.statmt.org/matrix/output/1902?run_id=6750)
+
+
+
+# en-de
+
+export PAIR=en-de
+export DATA_DIR=data/$PAIR
+export SAVE_DIR=data/$PAIR
+export BS=8
+mkdir -p $DATA_DIR
+sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
+sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
+echo $PAIR
+PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
+
+# (fairseq BLEU: 43.1 http://matrix.statmt.org/matrix/output/1909?run_id=6862)
+
+"""
+
+
+FSMT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`FSMTConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+
+"""
+FSMT_GENERATION_EXAMPLE = r"""
+ Translation example::
+
+ ```python
+ >>> from transformers import AutoTokenizer, FSMTForConditionalGeneration
+
+ >>> mname = "facebook/wmt19-ru-en"
+ >>> model = FSMTForConditionalGeneration.from_pretrained(mname)
+ >>> tokenizer = AutoTokenizer.from_pretrained(mname)
+
+ >>> src_text = "Машинное обучение - это здорово, не так ли?"
+ >>> input_ids = tokenizer(src_text, return_tensors="pt").input_ids
+ >>> outputs = model.generate(input_ids, num_beams=5, num_return_sequences=3)
+ >>> tokenizer.decode(outputs[0], skip_special_tokens=True)
+ "Machine learning is great, isn't it?"
+ ```
+
+"""
+
+FSMT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`FSTMTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Indices of decoder input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are decoder input IDs?](../glossary#decoder-input-ids)
+
+ FSMT uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
+ is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
+ decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
+ Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
+ be used by default.
+ head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules in the decoder. Mask values selected in `[0,
+ 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ encoder_outputs (`Tuple(torch.FloatTensor)`, *optional*):
+ Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`)
+ `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)` is a sequence of hidden-states at
+ the output of the last layer of the encoder. Used in the cross-attention of the decoder.
+ past_key_values (`Tuple(torch.FloatTensor)` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
+ Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding.
+ If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
+ don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
+ `decoder_input_ids` of shape `(batch_size, sequence_length)`.
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded
+ representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be
+ input (see `past_key_values`). This is useful if you want more control over how to convert
+ `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix.
+
+ If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value
+ of `inputs_embeds`.
+ use_cache (`bool`, *optional*, defaults to `True`):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+def invert_mask(attention_mask):
+ """Turns 1->0, 0->1, False->True, True-> False"""
+ assert attention_mask.dim() == 2
+ return attention_mask.eq(0)
+
+
+def triu_onnx(x, diagonal=0):
+ l = x.shape[0]
+ arange = torch.arange(l, device=x.device)
+ mask = arange.expand(l, l)
+ arange = arange.unsqueeze(-1)
+ if diagonal:
+ arange = arange + diagonal
+ mask = mask >= arange
+ return x.masked_fill(mask == 0, 0)
+
+
+def _prepare_fsmt_decoder_inputs(
+ config,
+ input_ids,
+ decoder_input_ids=None,
+ decoder_padding_mask=None,
+ causal_mask_dtype=torch.float32,
+):
+ """
+ Prepare masks that ignore padding tokens in the decoder and a causal mask for the decoder if none are provided.
+ This mimics the default behavior in fairseq. To override it pass in masks. Note: this is not called during
+ generation
+ """
+ pad_token_id = config.pad_token_id
+ if decoder_input_ids is None:
+ decoder_input_ids = shift_tokens_right(input_ids, pad_token_id)
+ bsz, tgt_len = decoder_input_ids.size()
+ if decoder_padding_mask is None:
+ decoder_padding_mask = make_padding_mask(decoder_input_ids, pad_token_id)
+ else:
+ decoder_padding_mask = invert_mask(decoder_padding_mask)
+ causal_mask = triu_onnx(fill_with_neg_inf(torch.zeros(tgt_len, tgt_len, dtype=causal_mask_dtype)), 1).to(
+ device=decoder_input_ids.device
+ )
+ return decoder_input_ids, decoder_padding_mask, causal_mask
+
+
+class PretrainedFSMTModel(PreTrainedModel):
+ config_class = FSMTConfig
+ base_model_prefix = "model"
+
+ def _init_weights(self, module):
+ std = self.config.init_std
+ if isinstance(module, nn.Linear):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, SinusoidalPositionalEmbedding):
+ pass
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=std)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+
+ @property
+ def dummy_inputs(self):
+ pad_token = self.config.pad_token_id
+ input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device)
+ dummy_inputs = {
+ "attention_mask": input_ids.ne(pad_token),
+ "input_ids": input_ids,
+ }
+ return dummy_inputs
+
+
+def _make_linear_from_emb(emb):
+ vocab_size, emb_size = emb.weight.shape
+ lin_layer = nn.Linear(vocab_size, emb_size, bias=False)
+ lin_layer.weight.data = emb.weight.data
+ return lin_layer
+
+
+# Helper Functions, mostly for making masks
+def _check_shapes(shape_1, shape2):
+ if shape_1 != shape2:
+ raise AssertionError(f"shape mismatch: {shape_1} != {shape2}")
+
+
+def shift_tokens_right(input_ids, pad_token_id):
+ """Shift input ids one token to the right, and wrap the last non pad token (usually )."""
+
+ # replace possible -100 values in labels by `pad_token_id`
+ input_ids.masked_fill_(input_ids == -100, pad_token_id)
+
+ prev_output_tokens = input_ids.clone()
+ index_of_eos = (input_ids.ne(pad_token_id).sum(dim=1) - 1).unsqueeze(-1)
+ prev_output_tokens[:, 0] = input_ids.gather(1, index_of_eos).squeeze()
+ prev_output_tokens[:, 1:] = input_ids[:, :-1]
+ return prev_output_tokens
+
+
+def make_padding_mask(input_ids, padding_idx=1):
+ """True for pad tokens"""
+ padding_mask = input_ids.eq(padding_idx)
+ if not padding_mask.any():
+ padding_mask = None
+ return padding_mask
+
+
+# Helper Modules
+
+
+class EncoderLayer(nn.Module):
+ def __init__(self, config: FSMTConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+ self.self_attn = Attention(self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout)
+ self.self_attn_layer_norm = LayerNorm(self.embed_dim)
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+ self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim)
+ self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = LayerNorm(self.embed_dim)
+
+ def forward(self, x, encoder_padding_mask, layer_head_mask, output_attentions=False):
+ """
+ Args:
+ x (`torch.Tensor`): input to the layer of shape *(seq_len, batch, embed_dim)*
+ encoder_padding_mask (`torch.ByteTensor`): binary ByteTensor of shape
+ *(batch, src_len)* where padding elements are indicated by `1`.
+ for t_tgt, t_src is excluded (or masked out), =0 means it is
+ included in attention
+ layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size
+ *(config.encoder_attention_heads,)*.
+
+ Returns:
+ encoded output of shape *(seq_len, batch, embed_dim)*
+ """
+ residual = x
+ x, attn_weights = self.self_attn(
+ query=x,
+ key=x,
+ key_padding_mask=encoder_padding_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+ x = residual + x
+ x = self.self_attn_layer_norm(x)
+
+ residual = x
+ x = self.activation_fn(self.fc1(x))
+ x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
+ x = self.fc2(x)
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+ x = residual + x
+ x = self.final_layer_norm(x)
+ return x, attn_weights
+
+
+class FSMTEncoder(nn.Module):
+ """
+ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`EncoderLayer`].
+
+ Args:
+ config: FSMTConfig
+ """
+
+ def __init__(self, config: FSMTConfig, embed_tokens):
+ super().__init__()
+ self.dropout = config.dropout
+ self.layerdrop = config.encoder_layerdrop
+ self.padding_idx = embed_tokens.padding_idx
+ self.embed_tokens = embed_tokens
+ embed_dim = embed_tokens.embedding_dim
+ self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0
+ self.embed_positions = SinusoidalPositionalEmbedding(
+ config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx
+ )
+ self.layers = nn.ModuleList([EncoderLayer(config) for _ in range(config.encoder_layers)]) # type: List[EncoderLayer]
+
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: torch.Tensor = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ """
+ Args:
+ input_ids (`torch.LongTensor`): tokens in the source language of shape
+ *(batch, src_len)*
+ attention_mask (`torch.LongTensor`): indicating which indices are padding tokens
+ inputs_embeds (`torch.FloatTensor`):
+ embedding vectors of shape *(batch, src_len, embed_dim)*
+ head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ Returns:
+ BaseModelOutput or Tuple comprised of:
+
+ - **x** (`torch.Tensor`): the last encoder layer's output of shape *(src_len, batch, embed_dim)*
+ - **encoder_states** (`Tuple(torch.FloatTensor`)): all intermediate hidden states of shape *(src_len,
+ batch, embed_dim)*. Only populated if *output_hidden_states:* is True.
+ - **all_attentions** (`Tuple(torch.FloatTensor`)): Attention weights for each layer.
+ During training might not be of length n_layers because of layer dropout.
+ """
+ # check attention mask and invert
+ if attention_mask is not None:
+ attention_mask = invert_mask(attention_mask)
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
+ embed_pos = self.embed_positions(input_ids)
+ elif inputs_embeds is not None:
+ inputs_embeds = inputs_embeds * self.embed_scale
+
+ # We assume zeros hidden states correspond to padding tokens
+ # and create `position_ids` where inputs_embeds[:, :, 0] == 0
+ position_ids = inputs_embeds[:, :, 0].masked_fill(
+ inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx
+ )
+
+ embed_pos = self.embed_positions(position_ids)
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ x = inputs_embeds + embed_pos
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+
+ # B x T x C -> T x B x C
+ x = x.transpose(0, 1)
+
+ encoder_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ # check if head_mask has a correct number of layers specified if desired
+ if head_mask is not None:
+ assert head_mask.size()[0] == (
+ len(self.layers)
+ ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
+ for idx, encoder_layer in enumerate(self.layers):
+ if output_hidden_states:
+ x = x.transpose(0, 1) # T x B x C -> B x T x C
+ encoder_states += (x,)
+ x = x.transpose(0, 1) # B x T x C -> T x B x C
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ dropout_probability = torch.rand([])
+ if self.training and (dropout_probability < self.layerdrop): # skip the layer
+ attn = None
+ else:
+ x, attn = encoder_layer(
+ x,
+ attention_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ if output_attentions:
+ all_attentions = all_attentions + (attn,)
+
+ # T x B x C -> B x T x C
+ x = x.transpose(0, 1)
+
+ if output_hidden_states:
+ encoder_states += (x,)
+
+ if not return_dict:
+ return tuple(v for v in [x, encoder_states, all_attentions] if v is not None)
+ return BaseModelOutput(last_hidden_state=x, hidden_states=encoder_states, attentions=all_attentions)
+
+
+class DecoderLayer(nn.Module):
+ def __init__(self, config: FSMTConfig):
+ super().__init__()
+ self.embed_dim = config.d_model
+
+ self.self_attn = Attention(
+ embed_dim=self.embed_dim,
+ num_heads=config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ )
+ self.dropout = config.dropout
+ self.activation_fn = ACT2FN[config.activation_function]
+ self.activation_dropout = config.activation_dropout
+
+ self.self_attn_layer_norm = LayerNorm(self.embed_dim)
+ self.encoder_attn = Attention(
+ self.embed_dim,
+ config.decoder_attention_heads,
+ dropout=config.attention_dropout,
+ encoder_decoder_attention=True,
+ )
+ self.encoder_attn_layer_norm = LayerNorm(self.embed_dim)
+ self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
+ self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
+ self.final_layer_norm = LayerNorm(self.embed_dim)
+
+ def forward(
+ self,
+ x,
+ encoder_hidden_states,
+ encoder_attn_mask=None,
+ layer_state=None,
+ causal_mask=None,
+ layer_head_mask=None,
+ cross_attn_layer_head_mask=None,
+ decoder_padding_mask=None,
+ output_attentions=False,
+ ):
+ residual = x
+
+ if layer_state is None:
+ layer_state = {}
+
+ # Self Attention
+ x, self_attn_weights = self.self_attn(
+ query=x,
+ key=x,
+ layer_state=layer_state, # adds keys to layer state
+ key_padding_mask=decoder_padding_mask,
+ attn_mask=causal_mask,
+ layer_head_mask=layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+ x = residual + x
+ x = self.self_attn_layer_norm(x)
+
+ # Cross attention
+ residual = x
+ assert self.encoder_attn.cache_key != self.self_attn.cache_key
+ x, cross_attn_weights = self.encoder_attn(
+ query=x,
+ key=encoder_hidden_states,
+ key_padding_mask=encoder_attn_mask,
+ layer_state=layer_state, # mutates layer state
+ layer_head_mask=cross_attn_layer_head_mask,
+ output_attentions=output_attentions,
+ )
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+ x = residual + x
+ x = self.encoder_attn_layer_norm(x)
+
+ # Fully Connected
+ residual = x
+ x = self.activation_fn(self.fc1(x))
+ x = nn.functional.dropout(x, p=self.activation_dropout, training=self.training)
+ x = self.fc2(x)
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+ x = residual + x
+ x = self.final_layer_norm(x)
+ return (
+ x,
+ self_attn_weights,
+ layer_state,
+ cross_attn_weights,
+ ) # layer_state = cache for decoding
+
+
+class FSMTDecoder(nn.Module):
+ """
+ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DecoderLayer`]
+
+ Args:
+ config: FSMTConfig
+ embed_tokens (nn.Embedding): output embedding
+ """
+
+ def __init__(self, config: FSMTConfig, embed_tokens: nn.Embedding):
+ super().__init__()
+ self.dropout = config.dropout
+ self.layerdrop = config.decoder_layerdrop
+ self.padding_idx = embed_tokens.padding_idx
+ self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0
+ self.embed_tokens = embed_tokens
+ embed_dim = embed_tokens.embedding_dim
+ self.embed_positions = SinusoidalPositionalEmbedding(
+ config.max_position_embeddings + self.padding_idx + 1, embed_dim, self.padding_idx
+ )
+ self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.decoder_layers)]) # type: List[DecoderLayer]
+
+ if is_deepspeed_zero3_enabled():
+ import deepspeed
+
+ with deepspeed.zero.GatheredParameters(self.embed_tokens.weight, modifier_rank=None):
+ embed_tokens_weight_shape = self.embed_tokens.weight.shape
+ else:
+ embed_tokens_weight_shape = self.embed_tokens.weight.shape
+ self.output_projection = nn.Linear(embed_tokens_weight_shape[1], embed_tokens_weight_shape[0], bias=False)
+ self.output_projection.weight = self.embed_tokens.weight
+
+ def forward(
+ self,
+ input_ids: torch.Tensor,
+ encoder_hidden_states: torch.Tensor,
+ encoder_padding_mask: torch.Tensor,
+ decoder_padding_mask: torch.Tensor,
+ decoder_causal_mask: torch.Tensor,
+ head_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
+ use_cache: bool = False,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = True,
+ ):
+ """
+ Includes several features from "Jointly Learning to Align and Translate with Transformer Models" (Garg et al.,
+ EMNLP 2019).
+
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch, tgt_len)`):
+ previous decoder outputs for teacher forcing
+ encoder_hidden_states: output from the encoder, used for
+ encoder-side attention
+ encoder_padding_mask: for ignoring pad tokens
+ past_key_values (dict or None): dictionary used for storing state during generation
+ head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ cross_attn_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ Returns:
+ BaseModelOutputWithPast or tuple:
+
+ - the decoder's features of shape *(batch, tgt_len, embed_dim)*
+ - the cache
+ - hidden states
+ - attentions
+ """
+ # check attention mask and invert
+ if encoder_padding_mask is not None:
+ encoder_padding_mask = invert_mask(encoder_padding_mask)
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
+ elif input_ids is not None:
+ # embed positions
+ positions = self.embed_positions(input_ids)
+ if use_cache:
+ input_ids = input_ids[:, -1:]
+ positions = positions[:, -1:] # happens after we embed them
+ x = self.embed_tokens(input_ids) * self.embed_scale
+ elif inputs_embeds is not None:
+ # We assume zeros hidden states correspond to padding tokens
+ # and create `position_ids` where inputs_embeds[:, :, 0] == 0
+ position_ids = inputs_embeds[:, :, 0].masked_fill(
+ inputs_embeds[:, :, 0].eq(0), self.embed_positions.padding_idx
+ )
+ positions = self.embed_positions(position_ids)
+ x = inputs_embeds * self.embed_scale
+ else:
+ raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
+
+ x += positions
+ x = nn.functional.dropout(x, p=self.dropout, training=self.training)
+
+ # Convert to FSMT output format: (BS, seq_len, model_dim) -> (seq_len, BS, model_dim)
+ x = x.transpose(0, 1)
+ encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
+
+ # decoder layers
+ all_hidden_states = () if output_hidden_states else None
+ all_self_attns = () if output_attentions else None
+ all_cross_attns = () if output_attentions else None
+ next_decoder_cache = []
+
+ # check if head_mask has a correct number of layers specified if desired
+ for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]):
+ if attn_mask is not None:
+ assert attn_mask.size()[0] == (len(self.layers)), (
+ f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for"
+ f" {head_mask.size()[0]}."
+ )
+ for idx, decoder_layer in enumerate(self.layers):
+ # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
+ if output_hidden_states:
+ x = x.transpose(0, 1)
+ all_hidden_states += (x,)
+ x = x.transpose(0, 1)
+ if self.training:
+ dropout_probability = torch.rand([])
+ if dropout_probability < self.layerdrop:
+ continue
+
+ layer_state = past_key_values[idx] if past_key_values is not None else None
+
+ x, layer_self_attn, layer_past, layer_cross_attn = decoder_layer(
+ x,
+ encoder_hidden_states,
+ encoder_attn_mask=encoder_padding_mask,
+ decoder_padding_mask=decoder_padding_mask,
+ layer_state=layer_state,
+ causal_mask=decoder_causal_mask,
+ layer_head_mask=(head_mask[idx] if head_mask is not None else None),
+ cross_attn_layer_head_mask=(cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None),
+ output_attentions=output_attentions,
+ )
+
+ if use_cache:
+ next_decoder_cache.append(layer_past.copy())
+
+ if output_attentions:
+ all_self_attns += (layer_self_attn,)
+ all_cross_attns += (layer_cross_attn,)
+
+ # add hidden states from the last decoder layer
+ if output_hidden_states:
+ x = x.transpose(0, 1)
+ all_hidden_states += (x,)
+ x = x.transpose(0, 1)
+
+ # Convert to standard output format: (seq_len, BS, model_dim) -> (BS, seq_len, model_dim)
+ x = x.transpose(0, 1)
+ encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
+
+ x = self.output_projection(x)
+
+ next_cache = next_decoder_cache if use_cache else None
+
+ if not return_dict:
+ return tuple(
+ v for v in [x, next_cache, all_hidden_states, all_self_attns, all_cross_attns] if v is not None
+ )
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=x,
+ past_key_values=next_cache,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attns,
+ cross_attentions=all_cross_attns,
+ )
+
+
+def _reorder_buffer(attn_cache, new_order):
+ for k, input_buffer_k in attn_cache.items():
+ if input_buffer_k is not None:
+ attn_cache[k] = input_buffer_k.index_select(0, new_order)
+ return attn_cache
+
+
+class Attention(nn.Module):
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
+
+ def __init__(
+ self,
+ embed_dim,
+ num_heads,
+ dropout=0.0,
+ bias=True,
+ encoder_decoder_attention=False, # otherwise self_attention
+ ):
+ super().__init__()
+ self.embed_dim = embed_dim
+ self.num_heads = num_heads
+ self.dropout = dropout
+ self.head_dim = embed_dim // num_heads
+ assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
+ self.scaling = self.head_dim**-0.5
+
+ self.encoder_decoder_attention = encoder_decoder_attention
+ self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
+ self.cache_key = "encoder_decoder" if self.encoder_decoder_attention else "self"
+
+ def _shape(self, tensor, seq_len, bsz):
+ return tensor.contiguous().view(seq_len, bsz * self.num_heads, self.head_dim).transpose(0, 1)
+
+ def forward(
+ self,
+ query,
+ key: Optional[Tensor],
+ key_padding_mask: Optional[Tensor] = None,
+ layer_state: Optional[Dict[str, Optional[Tensor]]] = None,
+ attn_mask: Optional[Tensor] = None,
+ layer_head_mask: Optional[Tensor] = None,
+ output_attentions=False,
+ ) -> Tuple[Tensor, Optional[Tensor]]:
+ """Input shape: Time(SeqLen) x Batch x Channel"""
+ static_kv: bool = self.encoder_decoder_attention
+ tgt_len, bsz, embed_dim = query.size()
+ assert embed_dim == self.embed_dim
+ assert list(query.size()) == [tgt_len, bsz, embed_dim]
+ # get here for encoder decoder cause of static_kv
+ if layer_state is not None: # reuse k,v and encoder_padding_mask
+ saved_state = layer_state.get(self.cache_key, {})
+ if "prev_key" in saved_state and static_kv:
+ # previous time steps are cached - no need to recompute key and value if they are static
+ key = None
+ else:
+ saved_state = None
+ layer_state = {}
+
+ q = self.q_proj(query) * self.scaling
+ if static_kv:
+ if key is None:
+ k = v = None
+ else:
+ k = self.k_proj(key)
+ v = self.v_proj(key)
+ else:
+ k = self.k_proj(query)
+ v = self.v_proj(query)
+
+ q = self._shape(q, tgt_len, bsz)
+ if k is not None:
+ k = self._shape(k, -1, bsz)
+ if v is not None:
+ v = self._shape(v, -1, bsz)
+
+ if saved_state is not None:
+ k, v, key_padding_mask = self._use_saved_state(k, v, saved_state, key_padding_mask, static_kv, bsz)
+
+ # Update cache
+ layer_state[self.cache_key] = {
+ "prev_key": k.view(bsz, self.num_heads, -1, self.head_dim),
+ "prev_value": v.view(bsz, self.num_heads, -1, self.head_dim),
+ "prev_key_padding_mask": key_padding_mask if not static_kv else None,
+ }
+
+ assert k is not None
+ src_len = k.size(1)
+ attn_weights = torch.bmm(q, k.transpose(1, 2))
+ assert attn_weights.size() == (bsz * self.num_heads, tgt_len, src_len)
+
+ if attn_mask is not None:
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attn_mask
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ # This is part of a workaround to get around fork/join parallelism not supporting Optional types.
+ if key_padding_mask is not None and key_padding_mask.dim() == 0:
+ key_padding_mask = None
+ assert key_padding_mask is None or key_padding_mask.size()[:2] == (
+ bsz,
+ src_len,
+ )
+
+ if key_padding_mask is not None: # don't attend to padding symbols
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ reshaped = key_padding_mask.unsqueeze(1).unsqueeze(2)
+ attn_weights = attn_weights.masked_fill(reshaped, torch.finfo(attn_weights.dtype).min)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
+
+ if layer_head_mask is not None:
+ assert layer_head_mask.size() == (
+ self.num_heads,
+ ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
+ attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
+
+ if output_attentions:
+ # make sure that attn_weights are included in graph
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
+ else:
+ attn_weights_reshaped = None
+
+ attn_probs = nn.functional.dropout(
+ attn_weights,
+ p=self.dropout,
+ training=self.training,
+ )
+
+ assert v is not None
+ attn_output = torch.bmm(attn_probs, v)
+ assert attn_output.size() == (bsz * self.num_heads, tgt_len, self.head_dim)
+ attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
+ attn_output = self.out_proj(attn_output)
+
+ return attn_output, attn_weights_reshaped
+
+ def _use_saved_state(self, k, v, saved_state, key_padding_mask, static_kv, bsz):
+ # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)
+ if "prev_key" in saved_state:
+ _prev_key = saved_state["prev_key"]
+ assert _prev_key is not None
+ prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)
+ if static_kv:
+ k = prev_key
+ else:
+ assert k is not None
+ k = torch.cat([prev_key, k], dim=1)
+ if "prev_value" in saved_state:
+ _prev_value = saved_state["prev_value"]
+ assert _prev_value is not None
+ prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)
+ if static_kv:
+ v = prev_value
+ else:
+ assert v is not None
+ v = torch.cat([prev_value, v], dim=1)
+ assert k is not None and v is not None
+ prev_key_padding_mask: Optional[Tensor] = saved_state.get("prev_key_padding_mask", None)
+ if prev_key_padding_mask is not None:
+ if static_kv:
+ new_key_padding_mask = prev_key_padding_mask
+ else:
+ new_key_padding_mask = torch.cat([prev_key_padding_mask, key_padding_mask], dim=1)
+ else:
+ new_key_padding_mask = key_padding_mask
+ return k, v, new_key_padding_mask
+
+
+def fill_with_neg_inf(t):
+ """FP16-compatible function that fills a input_ids with -inf."""
+ return t.float().fill_(torch.finfo(t.dtype).min).type_as(t)
+
+
+# Public API
+def _get_shape(t):
+ return getattr(t, "shape", None)
+
+
+@add_start_docstrings(
+ "The bare FSMT Model outputting raw hidden-states without any specific head on top.",
+ FSMT_START_DOCSTRING,
+)
+class FSMTModel(PretrainedFSMTModel):
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"]
+
+ def __init__(self, config: FSMTConfig):
+ super().__init__(config)
+
+ padding_idx = config.pad_token_id
+ encoder_embed_tokens = nn.Embedding(config.src_vocab_size, config.d_model, padding_idx)
+ decoder_embed_tokens = nn.Embedding(config.tgt_vocab_size, config.d_model, padding_idx)
+
+ self.encoder = FSMTEncoder(config, encoder_embed_tokens)
+ self.decoder = FSMTDecoder(config, decoder_embed_tokens)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_encoder(self):
+ return self.encoder
+
+ def get_decoder(self):
+ return self.decoder
+
+ def _tie_weights(self):
+ if self.config.tie_word_embeddings:
+ self._tie_or_clone_weights(self.decoder.embed_tokens, self.get_input_embeddings())
+ self._tie_or_clone_weights(self.decoder.output_projection, self.get_input_embeddings())
+
+ @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=Seq2SeqModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqModelOutput]:
+ if decoder_input_ids is None:
+ use_cache = False
+
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # make masks if user doesn't supply
+ if not use_cache and input_ids is not None:
+ decoder_input_ids, decoder_padding_mask, causal_mask = _prepare_fsmt_decoder_inputs(
+ self.config,
+ input_ids,
+ decoder_input_ids=decoder_input_ids,
+ decoder_padding_mask=decoder_attention_mask,
+ causal_mask_dtype=self.decoder.embed_tokens.weight.dtype,
+ )
+ else:
+ decoder_padding_mask, causal_mask = None, None
+
+ if decoder_input_ids is None and decoder_inputs_embeds is None:
+ raise ValueError("Make sure that `decoder_input_ids` or `decoder_inputs_embeds` are passed.")
+
+ if encoder_outputs is None:
+ encoder_outputs = self.encoder(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=False
+ elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):
+ encoder_outputs = BaseModelOutput(
+ last_hidden_state=encoder_outputs[0],
+ hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None,
+ attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None,
+ )
+
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
+ decoder_outputs = self.decoder(
+ decoder_input_ids,
+ encoder_outputs[0],
+ attention_mask,
+ decoder_padding_mask,
+ decoder_causal_mask=causal_mask,
+ inputs_embeds=decoder_inputs_embeds,
+ head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ if not return_dict:
+ return decoder_outputs + encoder_outputs
+
+ return Seq2SeqModelOutput(
+ last_hidden_state=decoder_outputs.last_hidden_state,
+ past_key_values=decoder_outputs.past_key_values,
+ decoder_hidden_states=decoder_outputs.hidden_states,
+ decoder_attentions=decoder_outputs.attentions,
+ cross_attentions=decoder_outputs.cross_attentions,
+ encoder_last_hidden_state=encoder_outputs.last_hidden_state,
+ encoder_hidden_states=encoder_outputs.hidden_states,
+ encoder_attentions=encoder_outputs.attentions,
+ )
+
+ def get_input_embeddings(self):
+ return self.encoder.embed_tokens
+
+ def set_input_embeddings(self, value):
+ self.encoder.embed_tokens = value
+
+ def get_output_embeddings(self):
+ return self.decoder.embed_tokens
+
+ def set_output_embeddings(self, value):
+ self.decoder.embed_tokens = value
+
+
+@add_start_docstrings(
+ "The FSMT Model with a language modeling head. Can be used for summarization.", FSMT_START_DOCSTRING
+)
+class FSMTForConditionalGeneration(PretrainedFSMTModel):
+ base_model_prefix = "model"
+ _tied_weights_keys = ["decoder.embed_tokens.weight", "decoder.output_projection.weight"]
+
+ def __init__(self, config: FSMTConfig):
+ super().__init__(config)
+ base_model = FSMTModel(config)
+ self.model = base_model
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(FSMT_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC)
+ @add_end_docstrings(FSMT_GENERATION_EXAMPLE)
+ def forward(
+ self,
+ input_ids: torch.LongTensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ decoder_input_ids: Optional[torch.LongTensor] = None,
+ decoder_attention_mask: Optional[torch.BoolTensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ decoder_head_mask: Optional[torch.Tensor] = None,
+ cross_attn_head_mask: Optional[torch.Tensor] = None,
+ encoder_outputs: Optional[Tuple[torch.FloatTensor]] = None,
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ decoder_inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], Seq2SeqLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
+
+ Returns:
+
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if labels is not None:
+ use_cache = False
+
+ outputs = self.model(
+ input_ids,
+ inputs_embeds=inputs_embeds,
+ attention_mask=attention_mask,
+ decoder_input_ids=decoder_input_ids,
+ decoder_inputs_embeds=decoder_inputs_embeds,
+ encoder_outputs=encoder_outputs,
+ decoder_attention_mask=decoder_attention_mask,
+ head_mask=head_mask,
+ decoder_head_mask=decoder_head_mask,
+ cross_attn_head_mask=cross_attn_head_mask,
+ past_key_values=past_key_values,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ lm_logits = outputs[0]
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ # TODO(SS): do we need to ignore pad tokens in labels?
+ masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.tgt_vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (lm_logits,) + outputs[1:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return Seq2SeqLMOutput(
+ loss=masked_lm_loss,
+ logits=lm_logits,
+ past_key_values=outputs.past_key_values,
+ decoder_hidden_states=outputs.decoder_hidden_states,
+ decoder_attentions=outputs.decoder_attentions,
+ cross_attentions=outputs.cross_attentions,
+ encoder_last_hidden_state=outputs.encoder_last_hidden_state,
+ encoder_hidden_states=outputs.encoder_hidden_states,
+ encoder_attentions=outputs.encoder_attentions,
+ )
+
+ def prepare_inputs_for_generation(
+ self,
+ decoder_input_ids,
+ past_key_values=None,
+ attention_mask=None,
+ head_mask=None,
+ decoder_head_mask=None,
+ cross_attn_head_mask=None,
+ use_cache=None,
+ encoder_outputs=None,
+ **kwargs,
+ ):
+ return {
+ "input_ids": None, # encoder_outputs is defined. input_ids not needed
+ "encoder_outputs": encoder_outputs,
+ "past_key_values": past_key_values,
+ "decoder_input_ids": decoder_input_ids,
+ "attention_mask": attention_mask,
+ "head_mask": head_mask,
+ "decoder_head_mask": decoder_head_mask,
+ "cross_attn_head_mask": cross_attn_head_mask,
+ "use_cache": use_cache, # change this to avoid caching (presumably for debugging)
+ }
+
+ def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
+ return shift_tokens_right(labels, self.config.pad_token_id)
+
+ @staticmethod
+ def _reorder_cache(past_key_values, beam_idx):
+ reordered_past = []
+ for layer_past in past_key_values:
+ # get the correct batch idx from decoder layer's batch dim for cross and self-attn
+ layer_past_new = {
+ attn_key: _reorder_buffer(attn_cache, beam_idx) for attn_key, attn_cache in layer_past.items()
+ }
+ reordered_past.append(layer_past_new)
+ return reordered_past
+
+ def get_encoder(self):
+ return self.model.encoder
+
+ def get_decoder(self):
+ return self.model.decoder
+
+ def get_output_embeddings(self):
+ return self.model.decoder.embed_tokens
+
+ def set_output_embeddings(self, value):
+ self.model.decoder.embed_tokens = value
+
+
+class SinusoidalPositionalEmbedding(nn.Embedding):
+ """
+ This module produces sinusoidal positional embeddings of any length.
+
+ We don't want to save the weight of this embedding since it's not trained (deterministic) and it can be huge.
+
+ Padding symbols are ignored.
+
+ These embeddings get automatically extended in forward if more positions is needed.
+ """
+
+ def __init__(self, num_positions, embedding_dim, padding_idx):
+ self.make_weight(num_positions, embedding_dim, padding_idx)
+
+ def make_weight(self, num_positions, embedding_dim, padding_idx):
+ weight = self.get_embedding(num_positions, embedding_dim, padding_idx)
+ if not hasattr(self, "weight"):
+ # in ___init__
+ super().__init__(num_positions, embedding_dim, padding_idx, _weight=weight)
+ else:
+ # in forward put the weights on the correct dtype and device of the param
+ weight = weight.to(dtype=self.weight.dtype, device=self.weight.device)
+ self.weight = nn.Parameter(weight)
+ self.weight.detach_()
+ self.weight.requires_grad = False
+
+ @staticmethod
+ def get_embedding(num_embeddings, embedding_dim, padding_idx):
+ """
+ Build sinusoidal embeddings.
+
+ This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of
+ "Attention Is All You Need".
+ """
+ half_dim = embedding_dim // 2
+ emb = math.log(10000) / (half_dim - 1)
+ emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
+ emb = torch.arange(num_embeddings, dtype=torch.int64).float().unsqueeze(1) * emb.unsqueeze(0)
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
+ if embedding_dim % 2 == 1:
+ # zero pad
+ emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
+ if padding_idx is not None:
+ emb[padding_idx, :] = 0
+ return emb
+
+ @staticmethod
+ def make_positions(tensor, padding_idx: int):
+ """
+ Replace non-padding symbols with their position numbers.
+
+ Position numbers begin at padding_idx+1. Padding symbols are ignored.
+ """
+ # The series of casts and type-conversions here are carefully
+ # balanced to both work with ONNX export and XLA. In particular XLA
+ # prefers ints, cumsum defaults to output longs, and ONNX doesn't know
+ # how to handle the dtype kwarg in cumsum.
+ mask = tensor.ne(padding_idx).int()
+ return (torch.cumsum(mask, dim=1).type_as(mask) * mask).long() + padding_idx
+
+ def forward(
+ self,
+ input,
+ incremental_state: Optional[Any] = None,
+ timestep: Optional[Tensor] = None,
+ ):
+ """Input is expected to be of size [bsz x seqlen]."""
+ bsz, seq_len = input.shape[:2]
+ max_pos = self.padding_idx + 1 + seq_len
+ if max_pos > self.weight.size(0):
+ # expand embeddings if needed
+ self.make_weight(max_pos, self.embedding_dim, self.padding_idx)
+ positions = self.make_positions(input, self.padding_idx)
+ return super().forward(positions)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/fsmt/tokenization_fsmt.py b/venv/lib/python3.10/site-packages/transformers/models/fsmt/tokenization_fsmt.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b0be1f8be24987259aaee01d3165aa03c9218a9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/fsmt/tokenization_fsmt.py
@@ -0,0 +1,519 @@
+# coding=utf-8
+# Copyright 2019 The Open AI Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for FSMT."""
+
+
+import json
+import os
+import re
+import unicodedata
+from typing import Dict, List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {
+ "src_vocab_file": "vocab-src.json",
+ "tgt_vocab_file": "vocab-tgt.json",
+ "merges_file": "merges.txt",
+}
+
+
+def get_pairs(word):
+ """
+ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length
+ strings)
+ """
+ pairs = set()
+ prev_char = word[0]
+ for char in word[1:]:
+ pairs.add((prev_char, char))
+ prev_char = char
+ return pairs
+
+
+def replace_unicode_punct(text):
+ """
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/replace-unicode-punctuation.perl
+ """
+ text = text.replace(",", ",")
+ text = re.sub(r"。\s*", ". ", text)
+ text = text.replace("、", ",")
+ text = text.replace("”", '"')
+ text = text.replace("“", '"')
+ text = text.replace("∶", ":")
+ text = text.replace(":", ":")
+ text = text.replace("?", "?")
+ text = text.replace("《", '"')
+ text = text.replace("》", '"')
+ text = text.replace(")", ")")
+ text = text.replace("!", "!")
+ text = text.replace("(", "(")
+ text = text.replace(";", ";")
+ text = text.replace("1", "1")
+ text = text.replace("」", '"')
+ text = text.replace("「", '"')
+ text = text.replace("0", "0")
+ text = text.replace("3", "3")
+ text = text.replace("2", "2")
+ text = text.replace("5", "5")
+ text = text.replace("6", "6")
+ text = text.replace("9", "9")
+ text = text.replace("7", "7")
+ text = text.replace("8", "8")
+ text = text.replace("4", "4")
+ text = re.sub(r".\s*", ". ", text)
+ text = text.replace("~", "~")
+ text = text.replace("’", "'")
+ text = text.replace("…", "...")
+ text = text.replace("━", "-")
+ text = text.replace("〈", "<")
+ text = text.replace("〉", ">")
+ text = text.replace("【", "[")
+ text = text.replace("】", "]")
+ text = text.replace("%", "%")
+ return text
+
+
+def remove_non_printing_char(text):
+ """
+ Port of https://github.com/moses-smt/mosesdecoder/blob/master/scripts/tokenizer/remove-non-printing-char.perl
+ """
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat.startswith("C"):
+ continue
+ output.append(char)
+ return "".join(output)
+
+
+# Porting notes:
+# this one is modeled after XLMTokenizer
+#
+# added:
+# - src_vocab_file,
+# - tgt_vocab_file,
+# - langs,
+
+
+class FSMTTokenizer(PreTrainedTokenizer):
+ """
+ Construct an FAIRSEQ Transformer tokenizer. Based on Byte-Pair Encoding. The tokenization process is the following:
+
+ - Moses preprocessing and tokenization.
+ - Normalizing all inputs text.
+ - The arguments `special_tokens` and the function `set_special_tokens`, can be used to add additional symbols (like
+ "__classify__") to a vocabulary.
+ - The argument `langs` defines a pair of languages.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
+ this superclass for more information regarding those methods.
+
+ Args:
+ langs (`List[str]`, *optional*):
+ A list of two languages to translate from and to, for instance `["en", "ru"]`.
+ src_vocab_file (`str`, *optional*):
+ File containing the vocabulary for the source language.
+ tgt_vocab_file (`st`, *optional*):
+ File containing the vocabulary for the target language.
+ merges_file (`str`, *optional*):
+ File containing the merges.
+ do_lower_case (`bool`, *optional*, defaults to `False`):
+ Whether or not to lowercase the input when tokenizing.
+ unk_token (`str`, *optional*, defaults to `""`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ langs=None,
+ src_vocab_file=None,
+ tgt_vocab_file=None,
+ merges_file=None,
+ do_lower_case=False,
+ unk_token="",
+ bos_token="",
+ sep_token="",
+ pad_token="",
+ **kwargs,
+ ):
+ try:
+ import sacremoses
+ except ImportError:
+ raise ImportError(
+ "You need to install sacremoses to use XLMTokenizer. "
+ "See https://pypi.org/project/sacremoses/ for installation."
+ )
+
+ self.sm = sacremoses
+
+ self.src_vocab_file = src_vocab_file
+ self.tgt_vocab_file = tgt_vocab_file
+ self.merges_file = merges_file
+ self.do_lower_case = do_lower_case
+
+ # cache of sm.MosesPunctNormalizer instance
+ self.cache_moses_punct_normalizer = {}
+ # cache of sm.MosesTokenizer instance
+ self.cache_moses_tokenizer = {}
+ self.cache_moses_detokenizer = {}
+
+ if langs and len(langs) == 2:
+ self.src_lang, self.tgt_lang = langs
+ else:
+ raise ValueError(
+ f"arg `langs` needs to be a list of 2 langs, e.g. ['en', 'ru'], but got {langs}. "
+ "Usually that means that tokenizer can't find a mapping for the given model path "
+ "in PRETRAINED_VOCAB_FILES_MAP, and other maps of this tokenizer."
+ )
+
+ with open(src_vocab_file, encoding="utf-8") as src_vocab_handle:
+ self.encoder = json.load(src_vocab_handle)
+ with open(tgt_vocab_file, encoding="utf-8") as tgt_vocab_handle:
+ tgt_vocab = json.load(tgt_vocab_handle)
+ self.decoder = {v: k for k, v in tgt_vocab.items()}
+ with open(merges_file, encoding="utf-8") as merges_handle:
+ merges = merges_handle.read().split("\n")[:-1]
+ merges = [tuple(merge.split()[:2]) for merge in merges]
+ self.bpe_ranks = dict(zip(merges, range(len(merges))))
+ self.cache = {}
+ super().__init__(
+ langs=langs,
+ src_vocab_file=src_vocab_file,
+ tgt_vocab_file=tgt_vocab_file,
+ merges_file=merges_file,
+ do_lower_case=do_lower_case,
+ unk_token=unk_token,
+ bos_token=bos_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ **kwargs,
+ )
+
+ # hack override
+ def get_vocab(self) -> Dict[str, int]:
+ return self.get_src_vocab()
+
+ # hack override
+ @property
+ def vocab_size(self) -> int:
+ return self.src_vocab_size
+
+ def moses_punct_norm(self, text, lang):
+ if lang not in self.cache_moses_punct_normalizer:
+ punct_normalizer = self.sm.MosesPunctNormalizer(lang=lang)
+ self.cache_moses_punct_normalizer[lang] = punct_normalizer
+ return self.cache_moses_punct_normalizer[lang].normalize(text)
+
+ def moses_tokenize(self, text, lang):
+ if lang not in self.cache_moses_tokenizer:
+ moses_tokenizer = self.sm.MosesTokenizer(lang=lang)
+ self.cache_moses_tokenizer[lang] = moses_tokenizer
+ return self.cache_moses_tokenizer[lang].tokenize(
+ text, aggressive_dash_splits=True, return_str=False, escape=True
+ )
+
+ def moses_detokenize(self, tokens, lang):
+ if lang not in self.cache_moses_detokenizer:
+ moses_detokenizer = self.sm.MosesDetokenizer(lang=lang)
+ self.cache_moses_detokenizer[lang] = moses_detokenizer
+ return self.cache_moses_detokenizer[lang].detokenize(tokens)
+
+ def moses_pipeline(self, text, lang):
+ text = replace_unicode_punct(text)
+ text = self.moses_punct_norm(text, lang)
+ text = remove_non_printing_char(text)
+ return text
+
+ @property
+ def src_vocab_size(self):
+ return len(self.encoder)
+
+ @property
+ def tgt_vocab_size(self):
+ return len(self.decoder)
+
+ def get_src_vocab(self):
+ return dict(self.encoder, **self.added_tokens_encoder)
+
+ def get_tgt_vocab(self):
+ return dict(self.decoder, **self.added_tokens_decoder)
+
+ def bpe(self, token):
+ word = tuple(token[:-1]) + (token[-1] + "",)
+ if token in self.cache:
+ return self.cache[token]
+ pairs = get_pairs(word)
+
+ if not pairs:
+ return token + ""
+
+ while True:
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
+ if bigram not in self.bpe_ranks:
+ break
+ first, second = bigram
+ new_word = []
+ i = 0
+ while i < len(word):
+ try:
+ j = word.index(first, i)
+ except ValueError:
+ new_word.extend(word[i:])
+ break
+ else:
+ new_word.extend(word[i:j])
+ i = j
+
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
+ new_word.append(first + second)
+ i += 2
+ else:
+ new_word.append(word[i])
+ i += 1
+ new_word = tuple(new_word)
+ word = new_word
+ if len(word) == 1:
+ break
+ else:
+ pairs = get_pairs(word)
+ word = " ".join(word)
+ if word == "\n ":
+ word = "\n"
+ self.cache[token] = word
+ return word
+
+ def _tokenize(self, text, lang="en", bypass_tokenizer=False):
+ """
+ Tokenize a string given language code using Moses.
+
+ Details of tokenization:
+
+ - [sacremoses](https://github.com/alvations/sacremoses): port of Moses
+ - Install with `pip install sacremoses`
+
+ Args:
+ - lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
+ languages. However, we don't enforce it.
+ - bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
+ (bool). If True, we only apply BPE.
+
+ Returns:
+ List of tokens.
+ """
+ # ignore `lang` which is currently isn't explicitly passed in tokenization_utils.py and always results in lang=en
+ # if lang != self.src_lang:
+ # raise ValueError(f"Expected lang={self.src_lang}, but got {lang}")
+ lang = self.src_lang
+
+ if self.do_lower_case:
+ text = text.lower()
+
+ if bypass_tokenizer:
+ text = text.split()
+ else:
+ text = self.moses_pipeline(text, lang=lang)
+ text = self.moses_tokenize(text, lang=lang)
+
+ split_tokens = []
+ for token in text:
+ if token:
+ split_tokens.extend(list(self.bpe(token).split(" ")))
+
+ return split_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.decoder.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+
+ # remove BPE
+ tokens = [t.replace(" ", "").replace("", " ") for t in tokens]
+ tokens = "".join(tokens).split()
+ # detokenize
+ text = self.moses_detokenize(tokens, self.tgt_lang)
+ return text
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A FAIRSEQ Transformer sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ sep = [self.sep_token_id]
+
+ # no bos used in fairseq
+ if token_ids_1 is None:
+ return token_ids_0 + sep
+ return token_ids_0 + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+ # no bos used in fairseq
+ if token_ids_1 is not None:
+ return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return ([0] * len(token_ids_0)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A FAIRSEQ
+ Transformer sequence pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An
+ FAIRSEQ_TRANSFORMER sequence pair mask has the following format:
+ """
+ sep = [self.sep_token_id]
+
+ # no bos used in fairseq
+ if token_ids_1 is None:
+ return len(token_ids_0 + sep) * [0]
+ return len(token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if not os.path.isdir(save_directory):
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
+ return
+
+ src_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["src_vocab_file"]
+ )
+ tgt_vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["tgt_vocab_file"]
+ )
+ merges_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
+ )
+
+ with open(src_vocab_file, "w", encoding="utf-8") as f:
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ with open(tgt_vocab_file, "w", encoding="utf-8") as f:
+ tgt_vocab = {v: k for k, v in self.decoder.items()}
+ f.write(json.dumps(tgt_vocab, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
+
+ index = 0
+ with open(merges_file, "w", encoding="utf-8") as writer:
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
+ " Please check that the tokenizer is not corrupted!"
+ )
+ index = token_index
+ writer.write(" ".join(bpe_tokens) + "\n")
+ index += 1
+
+ return src_vocab_file, tgt_vocab_file, merges_file
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ state["sm"] = None
+ return state
+
+ def __setstate__(self, d):
+ self.__dict__ = d
+
+ try:
+ import sacremoses
+ except ImportError:
+ raise ImportError(
+ "You need to install sacremoses to use XLMTokenizer. "
+ "See https://pypi.org/project/sacremoses/ for installation."
+ )
+
+ self.sm = sacremoses