diff --git a/.gitattributes b/.gitattributes
index 842ec444dfb9c819ac92288398382e96174b7523..ab4e34313e02b26adfa1b38739275f5ba0bab39d 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -72,3 +72,7 @@ venv/lib/python3.10/site-packages/pydantic_core/_pydantic_core.cpython-310-x86_6
venv/lib/python3.10/site-packages/zstandard/_cffi.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/yaml/_yaml.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
+venv/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 filter=lfs diff=lfs merge=lfs -text
diff --git a/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..a7d09bc5342f1bb17eecd22d6d73935384fe46ed
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h_swiglu.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0e03a35ada3cb32db3a50c91fa987c020e7cc3cd40d29299cd451c2d07654b09
+size 33555612
diff --git a/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..701fc2ff4c08680ecf1472dfaba3cca906cba4ab
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h_swiglu.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e69f123a14465a13101b5e6c579c7333c5b44c1505ada2b19b8408c84c9d1bcc
+size 33555627
diff --git a/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h_swiglu.weight/fp32.pt b/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..5179510e013ef9c586f11b55074e46de61c7884f
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/17.mlp.dense_h_to_4h_swiglu.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:459f609bbc9bd2e507784de5dea7fde30e6227ba8c994af991478d78c9e4332d
+size 33555533
diff --git a/ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
new file mode 100644
index 0000000000000000000000000000000000000000..1b5a8e17608b2024ddbbc1a3855ccbeec35eb732
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/21.mlp.dense_4h_to_h.weight/exp_avg_sq.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:a20bc4a005fdf87fa7206b9f6fc7a22ac2726348813efeeb490f96e3b6a8deb6
+size 33555627
diff --git a/ckpts/universal/global_step40/zero/3.post_attention_layernorm.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/3.post_attention_layernorm.weight/exp_avg.pt
new file mode 100644
index 0000000000000000000000000000000000000000..6c9db51f80dad9239b3ed841f91f4c4ed7d79eae
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/3.post_attention_layernorm.weight/exp_avg.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:6b6e3741eee58ba1e686f474f5b75272d6c0f142250242f97c037b984977eecf
+size 9372
diff --git a/ckpts/universal/global_step40/zero/3.post_attention_layernorm.weight/fp32.pt b/ckpts/universal/global_step40/zero/3.post_attention_layernorm.weight/fp32.pt
new file mode 100644
index 0000000000000000000000000000000000000000..781a705de88da197448088d4c7f33a8a03e75b80
--- /dev/null
+++ b/ckpts/universal/global_step40/zero/3.post_attention_layernorm.weight/fp32.pt
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:55baea1be4d87c71e5b29923772c5262815e5a7e3a187b3b741af9af0b9264b1
+size 9293
diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_1dc-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/arithmetic_1dc-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..01756b4d47703cc943f7721509af1ead77739d1e
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/arithmetic_1dc-v0-loglikelihood
@@ -0,0 +1 @@
+04c3a63a6b3c579bd3775d92b3076ba9130041d5ce7cf9244d3f86e95c804387
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_irregular_past_participle_verbs-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_irregular_past_participle_verbs-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..94d73d41da2f66060d05319caa8641493c7f8fc9
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_irregular_past_participle_verbs-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_irregular_past_participle_verbs": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_irregular_past_participle_verbs": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_left_branch_island_simple_question-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_left_branch_island_simple_question-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..057af2db85481de8a2e64488c35d48dbf3061ad7
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_left_branch_island_simple_question-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_left_branch_island_simple_question": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_left_branch_island_simple_question": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_only_npi_scope-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_only_npi_scope-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..82fbbab07d39f44d560d77f2f93535846b413e8e
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_only_npi_scope-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_only_npi_scope": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_only_npi_scope": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_passive_1-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_passive_1-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..183b815d22d6227785479681934c05726dc912b9
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_passive_1-v0-loglikelihood
@@ -0,0 +1 @@
+fa4addddd8e380031b8e0871776cabcb707c0f21dcaf5d8b3defec66cce55043
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap_long_distance-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap_long_distance-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..de9e8007180f265cb7b2aed51e277b93fded9ce6
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap_long_distance-v0-res.json
@@ -0,0 +1 @@
+{"results": {"blimp_wh_vs_that_no_gap_long_distance": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_vs_that_no_gap_long_distance": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_gender-v0-res.json b/lm-evaluation-harness/tests/testdata/crows_pairs_english_gender-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..c24fb9dd6dfb9c494474fc08011d6d86ef18f5ef
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_gender-v0-res.json
@@ -0,0 +1 @@
+{"results": {"crows_pairs_english_gender": {"likelihood_difference": 0.3361377482385407, "likelihood_difference_stderr": 0.012853081126751691, "pct_stereotype": 0.478125, "pct_stereotype_stderr": 0.027967820983765136}}, "versions": {"crows_pairs_english_gender": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_race_color-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_french_race_color-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..16127e96ad2c7051d8daf0bc0ad5114a6a07eb69
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_race_color-v0-loglikelihood
@@ -0,0 +1 @@
+6f9119026abff33c5c882d6172e092e806a8b21bd86864022978b1961839350f
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_race_color-v0-res.json b/lm-evaluation-harness/tests/testdata/crows_pairs_french_race_color-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..bdb9d9c6aff73eac1def51836e15733ad940835c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_race_color-v0-res.json
@@ -0,0 +1 @@
+{"results": {"crows_pairs_french_race_color": {"likelihood_difference": 0.33233909422443764, "likelihood_difference_stderr": 0.010623405969915857, "pct_stereotype": 0.4782608695652174, "pct_stereotype_stderr": 0.023315932363473738}}, "versions": {"crows_pairs_french_race_color": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_sexual_orientation-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_french_sexual_orientation-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..0336c1ddc64ef089490495a817922f3e7c9bdc73
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_sexual_orientation-v0-loglikelihood
@@ -0,0 +1 @@
+2ce823fdb93d325aa8fb40db5d335b093b4b69792763532d940a752440ee3a76
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/ethics_virtue-v0-res.json b/lm-evaluation-harness/tests/testdata/ethics_virtue-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..cf3e02d82662bc1c4de5f1cf3dd9442b321de623
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/ethics_virtue-v0-res.json
@@ -0,0 +1 @@
+{"results": {"ethics_virtue": {"acc": 0.5035175879396985, "acc_stderr": 0.0070893491553555765, "em": 0.036180904522613064}}, "versions": {"ethics_virtue": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-college_mathematics-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_mathematics-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..a840b6b6420053c343787f08d8d723ab5ba5c1d3
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_mathematics-v0-loglikelihood
@@ -0,0 +1 @@
+e9fe80752686527281f834d2397875b4580581434b94799f9de6aaa450bd73ff
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-electrical_engineering-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-electrical_engineering-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..9c9e72efdf98ed9afb4881647929246433e1f857
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-electrical_engineering-v0-loglikelihood
@@ -0,0 +1 @@
+b9b5d8b8bb02696302ec6bc2a99bf987a5504d3bae0e529d2c8f263538c97518
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_biology-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_biology-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..1e2c01e2b19082144373a13ee25e3e68bf8df588
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_biology-v0-loglikelihood
@@ -0,0 +1 @@
+d4dc051f37a49dc75c218741e87bc826fd44f31ee1309b55e0f33bd191c1bc78
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-logical_fallacies-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-logical_fallacies-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..c5cf5cb467d80051cea569ab30ccc20d697e1e57
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-logical_fallacies-v0-res.json
@@ -0,0 +1 @@
+{"results": {"hendrycksTest-logical_fallacies": {"acc": 0.20245398773006135, "acc_norm": 0.2147239263803681, "acc_norm_stderr": 0.03226219377286774, "acc_stderr": 0.03157065078911902}}, "versions": {"hendrycksTest-logical_fallacies": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-moral_scenarios-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-moral_scenarios-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..d5ea0d8156ae4efaa0f7568ae8fd3a8ed3992d37
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-moral_scenarios-v0-loglikelihood
@@ -0,0 +1 @@
+a8e1882e77728b53c8b86312254d08320d8363fb606d746a8dd145b812f62cf5
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_cloze-v0-res.json b/lm-evaluation-harness/tests/testdata/lambada_cloze-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3f3f931ac7e066cbab7b6ff68732360c764324f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_cloze-v0-res.json
@@ -0,0 +1 @@
+{"results": {"lambada_cloze": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_cloze": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai-v2.0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada_openai-v2.0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..688e67a5534f801d5b256905a0d05a60c0adf8fc
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_openai-v2.0-loglikelihood
@@ -0,0 +1 @@
+9ca5643bbaafed2f027eab5b68cc438e9e268f6df9a678e956e61726a985cf0b
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai_mt_it-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_it-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..ca3fd80298aa1c565c978b26e992ccd42c7144f6
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_it-v0-loglikelihood
@@ -0,0 +1 @@
+fd87c6c5cf4e0499c5f9f80e5bd7ee6a4f3d2991902a0cc3ec9e6eaf22d6760a
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/lambada_standard-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada_standard-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..fcbd56f50425ca6e143ccc0dd88458c051b63fb2
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/lambada_standard-v0-loglikelihood
@@ -0,0 +1 @@
+8958d9f8d8145046b692fadd8a9cc9c8bad5617c10774280cf7c24c21d2be160
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/math_algebra-v0-greedy_until b/lm-evaluation-harness/tests/testdata/math_algebra-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..ce881a0232cff3f1025b746184ce8a0170e34303
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/math_algebra-v0-greedy_until
@@ -0,0 +1 @@
+f19182ce697a2c095d9e5b56ee6659dc38c93994b69ca75d7c3d3f5fd87572b4
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/math_algebra-v1-greedy_until b/lm-evaluation-harness/tests/testdata/math_algebra-v1-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..ce881a0232cff3f1025b746184ce8a0170e34303
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/math_algebra-v1-greedy_until
@@ -0,0 +1 @@
+f19182ce697a2c095d9e5b56ee6659dc38c93994b69ca75d7c3d3f5fd87572b4
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v0-res.json b/lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..7a195d9ac43e6feb4a7fc354f5dc424a27b0bf7d
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v0-res.json
@@ -0,0 +1 @@
+{"results": {"math_intermediate_algebra": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_intermediate_algebra": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_arxiv-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_arxiv-v1-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..3aa1d8c7349449271fbd81fbbc06fde47a116028
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_arxiv-v1-loglikelihood_rolling
@@ -0,0 +1 @@
+814f9954e44368559602c00f7e85fa3971acdfd0315f508ec7df6318a79c55ec
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_arxiv-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_arxiv-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..05cbab38732c94665750aac31cd2c41688552a8d
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_arxiv-v1-res.json
@@ -0,0 +1 @@
+{"results": {"pile_arxiv": {"bits_per_byte": 1.55095665856779e-05, "byte_perplexity": 1.0000107504701365, "word_perplexity": 1.0000819333090385}}, "versions": {"pile_arxiv": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_bookcorpus2-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_bookcorpus2-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..967c14934b81e0880063c4239593fb74cd99cd8d
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_bookcorpus2-v1-res.json
@@ -0,0 +1 @@
+{"results": {"pile_bookcorpus2": {"bits_per_byte": 1.6780040419457868e-06, "byte_perplexity": 1.000001163104447, "word_perplexity": 1.0000066499426599}}, "versions": {"pile_bookcorpus2": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_europarl-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_europarl-v1-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..80272607557f6e0c97220efa30c8b9ad38f52aa8
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_europarl-v1-loglikelihood_rolling
@@ -0,0 +1 @@
+e67d3dbccd47d308bfc5b0e66b76d0dfc5e386ebfa94e056562c2281c395543f
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_hackernews-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_hackernews-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..68578fe4c952b8bccb26700be82df67450c558dd
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_hackernews-v0-res.json
@@ -0,0 +1 @@
+{"results": {"pile_hackernews": {"bits_per_byte": 0.00010170276359193358, "byte_perplexity": 1.0001017079354932, "word_perplexity": 1.0006273924348839}}, "versions": {"pile_hackernews": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_philpapers-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_philpapers-v1-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..4fbbc241ba9487c2513cdf46dbb76e004e401418
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_philpapers-v1-loglikelihood_rolling
@@ -0,0 +1 @@
+339ba5d8c044c4a3ff9b9a8eaa24da1d6c01b72972074eb671a7da049eeb7047
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_pile-cc-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_pile-cc-v1-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..d5369ed3c97838d67c2900cfac4aaeb5881ec884
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_pile-cc-v1-loglikelihood_rolling
@@ -0,0 +1 @@
+731fdef4a43949b179ba0c540148ebc2fa41583dd583ef580dd812076c66a451
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v1-loglikelihood_rolling
new file mode 100644
index 0000000000000000000000000000000000000000..81c2e5ed06321b250a08a4232b3720ea5b650156
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v1-loglikelihood_rolling
@@ -0,0 +1 @@
+68263c52adc0086011e2220b619983935cabb1cc1f5f9f8ee1a74ab2a7457967
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/qa4mre_2011-v0-res.json b/lm-evaluation-harness/tests/testdata/qa4mre_2011-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..601c4eb763d97500cfcd4e24ca6602986c49939c
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/qa4mre_2011-v0-res.json
@@ -0,0 +1 @@
+{"results": {"qa4mre_2011": {"acc": 0.225, "acc_norm": 0.23333333333333334, "acc_norm_stderr": 0.03877199986918664, "acc_stderr": 0.0382797091741014}}, "versions": {"qa4mre_2011": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/sst-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/sst-v0-loglikelihood
new file mode 100644
index 0000000000000000000000000000000000000000..52050de16b54b432bdd68fae780660a035b10c0a
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/sst-v0-loglikelihood
@@ -0,0 +1 @@
+d2ebe3a63517d1d481aa1513bebe124c57a0904554a1e95f566979cfe67b1a7f
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt16-en-ro-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt16-en-ro-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..babb8d2d74fb5585cf9578f8b1dc8be3dde43f63
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt16-en-ro-v0-res.json
@@ -0,0 +1 @@
+{"results": {"wmt16-en-ro": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.012004814364156886, "chrf_stderr": 6.424423961332661e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt16-en-ro": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-cs-en-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-cs-en-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..70c80afe5bd10baabdcb507faa385db124c1f42e
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-cs-en-v0-res.json
@@ -0,0 +1 @@
+{"results": {"wmt20-cs-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.006212086270964023, "chrf_stderr": 0.0001119165191795531, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-cs-en": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-de-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-en-de-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..c4078efd996d010eac102fe23de50fdbbe0310d9
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-en-de-v0-greedy_until
@@ -0,0 +1 @@
+b6e9c305766ea23ce1027309f83c6d4c2ce8948d70b63a7858586ca34050d7fb
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-iu-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-en-iu-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..22f042eb4eba6e6e662e46807232679782f7b6b9
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-en-iu-v0-res.json
@@ -0,0 +1 @@
+{"results": {"wmt20-en-iu": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.00011803644548940443, "chrf_stderr": 2.175287038623409e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-iu": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-pl-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-en-pl-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..bd431d61c479beb686d39be21905fdb0beb7781e
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-en-pl-v0-greedy_until
@@ -0,0 +1 @@
+952f02575d4936d93c4d2808d86c4bf5f1f3a0901212acee6cbc1f9cbd30d39e
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-ps-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-en-ps-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..77b600c49afa12cf988280e337a9d4747195f95f
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-en-ps-v0-greedy_until
@@ -0,0 +1 @@
+8411c2cb73114cbd0c6e0f17eab2625d486cc3a601105deb0ea1338a401df689
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..24db35e62fd176b0454ff426ab749787da805897
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-en-zh-v0-res.json
@@ -0,0 +1 @@
+{"results": {"wmt20-en-zh": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.00014170297316825535, "chrf_stderr": 6.590669847391838e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-zh": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-zh-v1-res.json b/lm-evaluation-harness/tests/testdata/wmt20-en-zh-v1-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..a7a56daf0e793acf229c9b16a751383473bd5e26
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-en-zh-v1-res.json
@@ -0,0 +1 @@
+{"results": {"wmt20-en-zh": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.00014170297316825535, "chrf_stderr": 6.590669847391838e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-zh": 1}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-fr-de-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-fr-de-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..d5d06a02a30635ad57907b32ae66ccb9ba5a7e23
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-fr-de-v0-res.json
@@ -0,0 +1 @@
+{"results": {"wmt20-fr-de": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.01143193767396364, "chrf_stderr": 0.00012555271954563658, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-fr-de": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-res.json
new file mode 100644
index 0000000000000000000000000000000000000000..a2f5cb31be388a5ca081567e72d16e42774aa008
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-pl-en-v0-res.json
@@ -0,0 +1 @@
+{"results": {"wmt20-pl-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.01353367757716276, "chrf_stderr": 0.00018386199249976465, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-pl-en": 0}}
\ No newline at end of file
diff --git a/lm-evaluation-harness/tests/testdata/wmt20-ps-en-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-ps-en-v0-greedy_until
new file mode 100644
index 0000000000000000000000000000000000000000..7776c5952383a6254943869dad8fddb50e50e987
--- /dev/null
+++ b/lm-evaluation-harness/tests/testdata/wmt20-ps-en-v0-greedy_until
@@ -0,0 +1 @@
+c3976465e3709b4bc371175cc1494c69fe096ea4ba7d114da779d2baa0a47466
\ No newline at end of file
diff --git a/venv/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..3143dc357e3d2c77e3f89f20f4d0d0f4c21a2ab5
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:cd9d3483be19a3c055f59bca0f17b5eca444ce6f141f27565d1f9ee031beffa8
+size 5341640
diff --git a/venv/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..023c4516cfa5f05fa06ead733714f72ee9b830fc
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/lxml/objectify.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:c85d194503e7816b08e4fca94004af158e3e7b0737ba0647a301cacbd6e92886
+size 3057096
diff --git a/venv/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so
new file mode 100644
index 0000000000000000000000000000000000000000..fd204ab3cf9ff61792745f6e204dcb8138d55e58
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pyarrow/_dataset.cpython-310-x86_64-linux-gnu.so
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:4473d519e5ef3c96f31cd8aa9772525c46a87d70d9a922e6dfdb5726abaa9b77
+size 1083224
diff --git a/venv/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 b/venv/lib/python3.10/site-packages/pyarrow/libparquet.so.1600
new file mode 100644
index 0000000000000000000000000000000000000000..bfee234d91ad9698d2baa37e8ae7ea80adf75434
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/pyarrow/libparquet.so.1600
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:0dd9b749bc026ebf236eb60571ba1355266a986210892bb8aaf2a6beb8146bd8
+size 10932648
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a569c3cc54bff82307d995f8bec52b9710279765
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/__init__.py
@@ -0,0 +1,29 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import _LazyModule
+
+
+_import_structure = {"tokenization_bert_japanese": ["BertJapaneseTokenizer", "CharacterTokenizer", "MecabTokenizer"]}
+
+
+if TYPE_CHECKING:
+ from .tokenization_bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c646f0c980365703fbb30773e52eb8235f5b4540
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/tokenization_bert_japanese.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/tokenization_bert_japanese.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bdb0b9027ef818c347671940cd7bc7287d064dd7
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/__pycache__/tokenization_bert_japanese.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/tokenization_bert_japanese.py b/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/tokenization_bert_japanese.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe5cd06f7f5854a78757c1297f7fc9ea8ae3500c
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/bert_japanese/tokenization_bert_japanese.py
@@ -0,0 +1,980 @@
+# coding=utf-8
+# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes."""
+
+
+import collections
+import copy
+import os
+import unicodedata
+from typing import Any, Dict, List, Optional, Tuple
+
+from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import is_sentencepiece_available, is_sudachi_projection_available, logging
+
+
+if is_sentencepiece_available():
+ import sentencepiece as spm
+else:
+ spm = None
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "spm_file": "spiece.model"}
+
+SPIECE_UNDERLINE = "▁"
+
+
+# Copied from transformers.models.bert.tokenization_bert.load_vocab
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+class BertJapaneseTokenizer(PreTrainedTokenizer):
+ r"""
+ Construct a BERT tokenizer for Japanese text.
+
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer
+ to: this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to a one-wordpiece-per-line vocabulary file.
+ spm_file (`str`, *optional*):
+ Path to [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .spm or .model
+ extension) that contains the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether to lower case the input. Only has an effect when do_basic_tokenize=True.
+ do_word_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether to do word tokenization.
+ do_subword_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether to do subword tokenization.
+ word_tokenizer_type (`str`, *optional*, defaults to `"basic"`):
+ Type of word tokenizer. Choose from ["basic", "mecab", "sudachi", "jumanpp"].
+ subword_tokenizer_type (`str`, *optional*, defaults to `"wordpiece"`):
+ Type of subword tokenizer. Choose from ["wordpiece", "character", "sentencepiece",].
+ mecab_kwargs (`dict`, *optional*):
+ Dictionary passed to the `MecabTokenizer` constructor.
+ sudachi_kwargs (`dict`, *optional*):
+ Dictionary passed to the `SudachiTokenizer` constructor.
+ jumanpp_kwargs (`dict`, *optional*):
+ Dictionary passed to the `JumanppTokenizer` constructor.
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+
+ def __init__(
+ self,
+ vocab_file,
+ spm_file=None,
+ do_lower_case=False,
+ do_word_tokenize=True,
+ do_subword_tokenize=True,
+ word_tokenizer_type="basic",
+ subword_tokenizer_type="wordpiece",
+ never_split=None,
+ unk_token="[UNK]",
+ sep_token="[SEP]",
+ pad_token="[PAD]",
+ cls_token="[CLS]",
+ mask_token="[MASK]",
+ mecab_kwargs=None,
+ sudachi_kwargs=None,
+ jumanpp_kwargs=None,
+ **kwargs,
+ ):
+ if subword_tokenizer_type == "sentencepiece":
+ if not os.path.isfile(spm_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{spm_file}'. To load the vocabulary from a Google"
+ " pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.spm_file = spm_file
+ else:
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google"
+ " pretrained model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+
+ self.do_word_tokenize = do_word_tokenize
+ self.word_tokenizer_type = word_tokenizer_type
+ self.lower_case = do_lower_case
+ self.never_split = never_split
+ self.mecab_kwargs = copy.deepcopy(mecab_kwargs)
+ self.sudachi_kwargs = copy.deepcopy(sudachi_kwargs)
+ self.jumanpp_kwargs = copy.deepcopy(jumanpp_kwargs)
+ if do_word_tokenize:
+ if word_tokenizer_type == "basic":
+ self.word_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case, never_split=never_split, tokenize_chinese_chars=False
+ )
+ elif word_tokenizer_type == "mecab":
+ self.word_tokenizer = MecabTokenizer(
+ do_lower_case=do_lower_case, never_split=never_split, **(mecab_kwargs or {})
+ )
+ elif word_tokenizer_type == "sudachi":
+ self.word_tokenizer = SudachiTokenizer(
+ do_lower_case=do_lower_case, never_split=never_split, **(sudachi_kwargs or {})
+ )
+ elif word_tokenizer_type == "jumanpp":
+ self.word_tokenizer = JumanppTokenizer(
+ do_lower_case=do_lower_case, never_split=never_split, **(jumanpp_kwargs or {})
+ )
+ else:
+ raise ValueError(f"Invalid word_tokenizer_type '{word_tokenizer_type}' is specified.")
+
+ self.do_subword_tokenize = do_subword_tokenize
+ self.subword_tokenizer_type = subword_tokenizer_type
+ if do_subword_tokenize:
+ if subword_tokenizer_type == "wordpiece":
+ self.subword_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+ elif subword_tokenizer_type == "character":
+ self.subword_tokenizer = CharacterTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+ elif subword_tokenizer_type == "sentencepiece":
+ self.subword_tokenizer = SentencepieceTokenizer(vocab=self.spm_file, unk_token=str(unk_token))
+ else:
+ raise ValueError(f"Invalid subword_tokenizer_type '{subword_tokenizer_type}' is specified.")
+ super().__init__(
+ spm_file=spm_file,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ pad_token=pad_token,
+ cls_token=cls_token,
+ mask_token=mask_token,
+ do_lower_case=do_lower_case,
+ do_word_tokenize=do_word_tokenize,
+ do_subword_tokenize=do_subword_tokenize,
+ word_tokenizer_type=word_tokenizer_type,
+ subword_tokenizer_type=subword_tokenizer_type,
+ never_split=never_split,
+ mecab_kwargs=mecab_kwargs,
+ sudachi_kwargs=sudachi_kwargs,
+ jumanpp_kwargs=jumanpp_kwargs,
+ **kwargs,
+ )
+
+ @property
+ def do_lower_case(self):
+ return self.lower_case
+
+ def __getstate__(self):
+ state = dict(self.__dict__)
+ if self.word_tokenizer_type in ["mecab", "sudachi", "jumanpp"]:
+ del state["word_tokenizer"]
+ return state
+
+ def __setstate__(self, state):
+ self.__dict__ = state
+ if self.word_tokenizer_type == "mecab":
+ self.word_tokenizer = MecabTokenizer(
+ do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.mecab_kwargs or {})
+ )
+ elif self.word_tokenizer_type == "sudachi":
+ self.word_tokenizer = SudachiTokenizer(
+ do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.sudachi_kwargs or {})
+ )
+ elif self.word_tokenizer_type == "jumanpp":
+ self.word_tokenizer = JumanppTokenizer(
+ do_lower_case=self.do_lower_case, never_split=self.never_split, **(self.jumanpp_kwargs or {})
+ )
+
+ def _tokenize(self, text):
+ if self.do_word_tokenize:
+ tokens = self.word_tokenizer.tokenize(text, never_split=self.all_special_tokens)
+ else:
+ tokens = [text]
+
+ if self.do_subword_tokenize:
+ split_tokens = [sub_token for token in tokens for sub_token in self.subword_tokenizer.tokenize(token)]
+ else:
+ split_tokens = tokens
+
+ return split_tokens
+
+ @property
+ def vocab_size(self):
+ if self.subword_tokenizer_type == "sentencepiece":
+ return len(self.subword_tokenizer.sp_model)
+ return len(self.vocab)
+
+ def get_vocab(self):
+ if self.subword_tokenizer_type == "sentencepiece":
+ vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}
+ vocab.update(self.added_tokens_encoder)
+ return vocab
+ return dict(self.vocab, **self.added_tokens_encoder)
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ if self.subword_tokenizer_type == "sentencepiece":
+ return self.subword_tokenizer.sp_model.PieceToId(token)
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ if self.subword_tokenizer_type == "sentencepiece":
+ return self.subword_tokenizer.sp_model.IdToPiece(index)
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ if self.subword_tokenizer_type == "sentencepiece":
+ return self.subword_tokenizer.sp_model.decode(tokens)
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.build_inputs_with_special_tokens
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A BERT sequence has the following format:
+
+ - single sequence: `[CLS] X [SEP]`
+ - pair of sequences: `[CLS] A [SEP] B [SEP]`
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + token_ids_1 + sep
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.get_special_tokens_mask
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` method.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Whether or not the token list is already formatted with special tokens for the model.
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is not None:
+ return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1]
+
+ # Copied from transformers.models.bert.tokenization_bert.BertTokenizer.create_token_type_ids_from_sequences
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence
+ pair mask has the following format:
+
+ ```
+ 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
+ | first sequence | second sequence |
+ ```
+
+ If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ if os.path.isdir(save_directory):
+ if self.subword_tokenizer_type == "sentencepiece":
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["spm_file"]
+ )
+ else:
+ vocab_file = os.path.join(
+ save_directory,
+ (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"],
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+
+ if self.subword_tokenizer_type == "sentencepiece":
+ with open(vocab_file, "wb") as writer:
+ content_spiece_model = self.subword_tokenizer.sp_model.serialized_model_proto()
+ writer.write(content_spiece_model)
+ else:
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ index = 0
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+class MecabTokenizer:
+ """Runs basic tokenization with MeCab morphological parser."""
+
+ def __init__(
+ self,
+ do_lower_case=False,
+ never_split=None,
+ normalize_text=True,
+ mecab_dic: Optional[str] = "ipadic",
+ mecab_option: Optional[str] = None,
+ ):
+ """
+ Constructs a MecabTokenizer.
+
+ Args:
+ **do_lower_case**: (*optional*) boolean (default True)
+ Whether to lowercase the input.
+ **never_split**: (*optional*) list of str
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of tokens not to split.
+ **normalize_text**: (*optional*) boolean (default True)
+ Whether to apply unicode normalization to text before tokenization.
+ **mecab_dic**: (*optional*) string (default "ipadic")
+ Name of dictionary to be used for MeCab initialization. If you are using a system-installed dictionary,
+ set this option to `None` and modify *mecab_option*.
+ **mecab_option**: (*optional*) string
+ String passed to MeCab constructor.
+ """
+ self.do_lower_case = do_lower_case
+ self.never_split = never_split if never_split is not None else []
+ self.normalize_text = normalize_text
+
+ try:
+ import fugashi
+ except ModuleNotFoundError as error:
+ raise error.__class__(
+ "You need to install fugashi to use MecabTokenizer. "
+ "See https://pypi.org/project/fugashi/ for installation."
+ )
+
+ mecab_option = mecab_option or ""
+
+ if mecab_dic is not None:
+ if mecab_dic == "ipadic":
+ try:
+ import ipadic
+ except ModuleNotFoundError as error:
+ raise error.__class__(
+ "The ipadic dictionary is not installed. "
+ "See https://github.com/polm/ipadic-py for installation."
+ )
+
+ dic_dir = ipadic.DICDIR
+
+ elif mecab_dic == "unidic_lite":
+ try:
+ import unidic_lite
+ except ModuleNotFoundError as error:
+ raise error.__class__(
+ "The unidic_lite dictionary is not installed. "
+ "See https://github.com/polm/unidic-lite for installation."
+ )
+
+ dic_dir = unidic_lite.DICDIR
+
+ elif mecab_dic == "unidic":
+ try:
+ import unidic
+ except ModuleNotFoundError as error:
+ raise error.__class__(
+ "The unidic dictionary is not installed. "
+ "See https://github.com/polm/unidic-py for installation."
+ )
+
+ dic_dir = unidic.DICDIR
+ if not os.path.isdir(dic_dir):
+ raise RuntimeError(
+ "The unidic dictionary itself is not found. "
+ "See https://github.com/polm/unidic-py for installation."
+ )
+
+ else:
+ raise ValueError("Invalid mecab_dic is specified.")
+
+ mecabrc = os.path.join(dic_dir, "mecabrc")
+ mecab_option = f'-d "{dic_dir}" -r "{mecabrc}" ' + mecab_option
+
+ self.mecab = fugashi.GenericTagger(mecab_option)
+
+ def tokenize(self, text, never_split=None, **kwargs):
+ """Tokenizes a piece of text."""
+ if self.normalize_text:
+ text = unicodedata.normalize("NFKC", text)
+
+ never_split = self.never_split + (never_split if never_split is not None else [])
+ tokens = []
+
+ for word in self.mecab(text):
+ token = word.surface
+
+ if self.do_lower_case and token not in never_split:
+ token = token.lower()
+
+ tokens.append(token)
+
+ return tokens
+
+
+class SudachiTokenizer:
+ """Runs basic tokenization with Sudachi morphological parser."""
+
+ def __init__(
+ self,
+ do_lower_case=False,
+ never_split=None,
+ normalize_text=True,
+ trim_whitespace=False,
+ sudachi_split_mode="A",
+ sudachi_config_path=None,
+ sudachi_resource_dir=None,
+ sudachi_dict_type="core",
+ sudachi_projection=None,
+ ):
+ """
+ Constructs a SudachiTokenizer.
+
+ Args:
+ **do_lower_case**: (*optional*) boolean (default True)
+ Whether to lowercase the input.
+ **never_split**: (*optional*) list of str
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of tokens not to split.
+ **normalize_text**: (*optional*) boolean (default True)
+ Whether to apply unicode normalization to text before tokenization.
+ **trim_whitespace**: (*optional*) boolean (default False)
+ Whether to trim all whitespace, tab, newline from tokens.
+ **sudachi_split_mode**: (*optional*) string
+ Split mode of sudachi, choose from `["A", "B", "C"]`.
+ **sudachi_config_path**: (*optional*) string
+ **sudachi_resource_dir**: (*optional*) string
+ **sudachi_dict_type**: (*optional*) string
+ dict type of sudachi, choose from `["small", "core", "full"]`.
+ **sudachi_projection**: (*optional*) string
+ Word projection mode of sudachi, choose from `["surface", "normalized", "reading", "dictionary", "dictionary_and_surface", "normalized_and_surface", "normalized_nouns"]`.
+ """
+
+ self.do_lower_case = do_lower_case
+ self.never_split = never_split if never_split is not None else []
+ self.normalize_text = normalize_text
+ self.trim_whitespace = trim_whitespace
+
+ try:
+ from sudachipy import dictionary, tokenizer
+ except ImportError:
+ raise ImportError(
+ "You need to install sudachipy to use SudachiTokenizer. "
+ "See https://github.com/WorksApplications/SudachiPy for installation."
+ )
+
+ if sudachi_split_mode == "A":
+ self.split_mode = tokenizer.Tokenizer.SplitMode.A
+ elif sudachi_split_mode == "B":
+ self.split_mode = tokenizer.Tokenizer.SplitMode.B
+ elif sudachi_split_mode == "C":
+ self.split_mode = tokenizer.Tokenizer.SplitMode.C
+ else:
+ raise ValueError("Invalid sudachi_split_mode is specified.")
+
+ self.projection = sudachi_projection
+
+ sudachi_dictionary = dictionary.Dictionary(
+ config_path=sudachi_config_path, resource_dir=sudachi_resource_dir, dict=sudachi_dict_type
+ )
+ if is_sudachi_projection_available():
+ self.sudachi = sudachi_dictionary.create(self.split_mode, projection=self.projection)
+ elif self.projection is not None:
+ raise ImportError("You need to install sudachipy>=0.6.8 to specify `projection` field in sudachi_kwargs.")
+ else:
+ self.sudachi = sudachi_dictionary.create(self.split_mode)
+
+ def tokenize(self, text, never_split=None, **kwargs):
+ """Tokenizes a piece of text."""
+ if self.normalize_text:
+ text = unicodedata.normalize("NFKC", text)
+
+ never_split = self.never_split + (never_split if never_split is not None else [])
+ tokens = []
+
+ for word in self.sudachi.tokenize(text):
+ token = word.surface()
+
+ if self.do_lower_case and token not in never_split:
+ token = token.lower()
+
+ if self.trim_whitespace:
+ if token.strip() == "":
+ continue
+ else:
+ token = token.strip()
+
+ tokens.append(token)
+
+ return tokens
+
+
+class JumanppTokenizer:
+ """Runs basic tokenization with jumanpp morphological parser."""
+
+ def __init__(
+ self,
+ do_lower_case=False,
+ never_split=None,
+ normalize_text=True,
+ trim_whitespace=False,
+ ):
+ """
+ Constructs a JumanppTokenizer.
+
+ Args:
+ **do_lower_case**: (*optional*) boolean (default True)
+ Whether to lowercase the input.
+ **never_split**: (*optional*) list of str
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of tokens not to split.
+ **normalize_text**: (*optional*) boolean (default True)
+ Whether to apply unicode normalization to text before tokenization.
+ **trim_whitespace**: (*optional*) boolean (default False)
+ Whether to trim all whitespace, tab, newline from tokens.
+ """
+
+ self.do_lower_case = do_lower_case
+ self.never_split = never_split if never_split is not None else []
+ self.normalize_text = normalize_text
+ self.trim_whitespace = trim_whitespace
+
+ try:
+ import rhoknp
+ except ImportError:
+ raise ImportError(
+ "You need to install rhoknp to use JumanppTokenizer. "
+ "See https://github.com/ku-nlp/rhoknp for installation."
+ )
+
+ self.juman = rhoknp.Jumanpp()
+
+ def tokenize(self, text, never_split=None, **kwargs):
+ """Tokenizes a piece of text."""
+ if self.normalize_text:
+ text = unicodedata.normalize("NFKC", text)
+
+ text = text.strip()
+
+ never_split = self.never_split + (never_split if never_split is not None else [])
+ tokens = []
+
+ for mrph in self.juman.apply_to_sentence(text).morphemes:
+ token = mrph.text
+
+ if self.do_lower_case and token not in never_split:
+ token = token.lower()
+
+ if self.trim_whitespace:
+ if token.strip() == "":
+ continue
+ else:
+ token = token.strip()
+
+ tokens.append(token)
+
+ return tokens
+
+
+class CharacterTokenizer:
+ """Runs Character tokenization."""
+
+ def __init__(self, vocab, unk_token, normalize_text=True):
+ """
+ Constructs a CharacterTokenizer.
+
+ Args:
+ **vocab**:
+ Vocabulary object.
+ **unk_token**: str
+ A special symbol for out-of-vocabulary token.
+ **normalize_text**: (`optional`) boolean (default True)
+ Whether to apply unicode normalization to text before tokenization.
+ """
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.normalize_text = normalize_text
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into characters.
+
+ For example, `input = "apple""` wil return as output `["a", "p", "p", "l", "e"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens.
+ This should have already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of characters.
+ """
+ if self.normalize_text:
+ text = unicodedata.normalize("NFKC", text)
+
+ output_tokens = []
+ for char in text:
+ if char not in self.vocab:
+ output_tokens.append(self.unk_token)
+ continue
+
+ output_tokens.append(char)
+
+ return output_tokens
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
+
+
+class SentencepieceTokenizer(object):
+ """
+ Runs sentencepiece tokenization. Based on transformers.models.albert.tokenization_albert.AlbertTokenizer.
+ """
+
+ def __init__(
+ self,
+ vocab,
+ unk_token,
+ do_lower_case=False,
+ remove_space=True,
+ keep_accents=True,
+ sp_model_kwargs: Optional[Dict[str, Any]] = None,
+ ):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.do_lower_case = do_lower_case
+ self.remove_space = remove_space
+ self.keep_accents = keep_accents
+
+ self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs
+ self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)
+ self.sp_model.Load(self.vocab)
+
+ def preprocess_text(self, inputs):
+ if self.remove_space:
+ outputs = " ".join(inputs.strip().split())
+ else:
+ outputs = inputs
+ outputs = outputs.replace("``", '"').replace("''", '"')
+
+ if not self.keep_accents:
+ outputs = unicodedata.normalize("NFKD", outputs)
+ outputs = "".join([c for c in outputs if not unicodedata.combining(c)])
+ if self.do_lower_case:
+ outputs = outputs.lower()
+
+ return outputs
+
+ def tokenize(self, text):
+ """
+ Tokenizes text by sentencepiece. Based on [SentencePiece](https://github.com/google/sentencepiece).
+ Tokenization needs the given vocabulary.
+
+ Args:
+ text: A string needs to be tokenized.
+
+ Returns:
+ A list of sentencepiece tokens.
+ """
+ text = self.preprocess_text(text)
+ pieces = self.sp_model.encode(text, out_type=str)
+ new_pieces = []
+ for piece in pieces:
+ if len(piece) > 1 and piece[-1] == str(",") and piece[-2].isdigit():
+ cur_pieces = self.sp_model.EncodeAsPieces(piece[:-1].replace(SPIECE_UNDERLINE, ""))
+ if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
+ if len(cur_pieces[0]) == 1:
+ cur_pieces = cur_pieces[1:]
+ else:
+ cur_pieces[0] = cur_pieces[0][1:]
+ cur_pieces.append(piece[-1])
+ new_pieces.extend(cur_pieces)
+ else:
+ new_pieces.append(piece)
+
+ return new_pieces
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..993a99c0819bd655544545e325940c8ac73f41a9
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__init__.py
@@ -0,0 +1,130 @@
+# Copyright 2020 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_flax_available,
+ is_tf_available,
+ is_tokenizers_available,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_mpnet": ["MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "MPNetConfig"],
+ "tokenization_mpnet": ["MPNetTokenizer"],
+}
+
+try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["tokenization_mpnet_fast"] = ["MPNetTokenizerFast"]
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_mpnet"] = [
+ "MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "MPNetForMaskedLM",
+ "MPNetForMultipleChoice",
+ "MPNetForQuestionAnswering",
+ "MPNetForSequenceClassification",
+ "MPNetForTokenClassification",
+ "MPNetLayer",
+ "MPNetModel",
+ "MPNetPreTrainedModel",
+ ]
+
+try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_tf_mpnet"] = [
+ "TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "TFMPNetEmbeddings",
+ "TFMPNetForMaskedLM",
+ "TFMPNetForMultipleChoice",
+ "TFMPNetForQuestionAnswering",
+ "TFMPNetForSequenceClassification",
+ "TFMPNetForTokenClassification",
+ "TFMPNetMainLayer",
+ "TFMPNetModel",
+ "TFMPNetPreTrainedModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_mpnet import MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, MPNetConfig
+ from .tokenization_mpnet import MPNetTokenizer
+
+ try:
+ if not is_tokenizers_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .tokenization_mpnet_fast import MPNetTokenizerFast
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_mpnet import (
+ MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
+ MPNetForMaskedLM,
+ MPNetForMultipleChoice,
+ MPNetForQuestionAnswering,
+ MPNetForSequenceClassification,
+ MPNetForTokenClassification,
+ MPNetLayer,
+ MPNetModel,
+ MPNetPreTrainedModel,
+ )
+
+ try:
+ if not is_tf_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_tf_mpnet import (
+ TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST,
+ TFMPNetEmbeddings,
+ TFMPNetForMaskedLM,
+ TFMPNetForMultipleChoice,
+ TFMPNetForQuestionAnswering,
+ TFMPNetForSequenceClassification,
+ TFMPNetForTokenClassification,
+ TFMPNetMainLayer,
+ TFMPNetModel,
+ TFMPNetPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..17ff8109fae6c9684aee9f96864f6e08d3268aba
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/configuration_mpnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/configuration_mpnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e65bf998616ab028eed329a3f84acb112456b9e8
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/configuration_mpnet.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_mpnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_mpnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..89a45f48370366be9bf012444f3f65ea9de33a79
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_mpnet.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_tf_mpnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_tf_mpnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1aaf6429f638625c580ddb8734d0fa254bd57db5
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/modeling_tf_mpnet.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f9ca6a7b579ccb7c6bebea2e21fe62f6071033be
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet_fast.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..2860107af2323fb0ff281aa0a4e6207632c12bff
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mpnet/__pycache__/tokenization_mpnet_fast.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/configuration_mpnet.py b/venv/lib/python3.10/site-packages/transformers/models/mpnet/configuration_mpnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8cb07894bde1cfb5c0729e6fe45d66b2b79de14
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mpnet/configuration_mpnet.py
@@ -0,0 +1,116 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" MPNet model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class MPNetConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`MPNetModel`] or a [`TFMPNetModel`]. It is used to
+ instantiate a MPNet model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the MPNet
+ [microsoft/mpnet-base](https://huggingface.co/microsoft/mpnet-base) architecture.
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ vocab_size (`int`, *optional*, defaults to 30527):
+ Vocabulary size of the MPNet model. Defines the number of different tokens that can be represented by the
+ `inputs_ids` passed when calling [`MPNetModel`] or [`TFMPNetModel`].
+ hidden_size (`int`, *optional*, defaults to 768):
+ Dimensionality of the encoder layers and the pooler layer.
+ num_hidden_layers (`int`, *optional*, defaults to 12):
+ Number of hidden layers in the Transformer encoder.
+ num_attention_heads (`int`, *optional*, defaults to 12):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ intermediate_size (`int`, *optional*, defaults to 3072):
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
+ The dropout ratio for the attention probabilities.
+ max_position_embeddings (`int`, *optional*, defaults to 512):
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
+ just in case (e.g., 512 or 1024 or 2048).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
+ The epsilon used by the layer normalization layers.
+ relative_attention_num_buckets (`int`, *optional*, defaults to 32):
+ The number of buckets to use for each attention layer.
+
+ Examples:
+
+ ```python
+ >>> from transformers import MPNetModel, MPNetConfig
+
+ >>> # Initializing a MPNet mpnet-base style configuration
+ >>> configuration = MPNetConfig()
+
+ >>> # Initializing a model from the mpnet-base style configuration
+ >>> model = MPNetModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```"""
+
+ model_type = "mpnet"
+
+ def __init__(
+ self,
+ vocab_size=30527,
+ hidden_size=768,
+ num_hidden_layers=12,
+ num_attention_heads=12,
+ intermediate_size=3072,
+ hidden_act="gelu",
+ hidden_dropout_prob=0.1,
+ attention_probs_dropout_prob=0.1,
+ max_position_embeddings=512,
+ initializer_range=0.02,
+ layer_norm_eps=1e-12,
+ relative_attention_num_buckets=32,
+ pad_token_id=1,
+ bos_token_id=0,
+ eos_token_id=2,
+ **kwargs,
+ ):
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
+
+ self.vocab_size = vocab_size
+ self.hidden_size = hidden_size
+ self.num_hidden_layers = num_hidden_layers
+ self.num_attention_heads = num_attention_heads
+ self.hidden_act = hidden_act
+ self.intermediate_size = intermediate_size
+ self.hidden_dropout_prob = hidden_dropout_prob
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
+ self.max_position_embeddings = max_position_embeddings
+ self.initializer_range = initializer_range
+ self.layer_norm_eps = layer_norm_eps
+ self.relative_attention_num_buckets = relative_attention_num_buckets
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/modeling_mpnet.py b/venv/lib/python3.10/site-packages/transformers/models/mpnet/modeling_mpnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9b9f90d398d90aa200b4fb8c3d9bd9ad55e0bb6
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mpnet/modeling_mpnet.py
@@ -0,0 +1,1052 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch MPNet model."""
+
+
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
+
+from ...activations import ACT2FN, gelu
+from ...modeling_outputs import (
+ BaseModelOutput,
+ BaseModelOutputWithPooling,
+ MaskedLMOutput,
+ MultipleChoiceModelOutput,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutput,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
+from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
+from .configuration_mpnet import MPNetConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "microsoft/mpnet-base"
+_CONFIG_FOR_DOC = "MPNetConfig"
+
+
+from ..deprecated._archive_maps import MPNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class MPNetPreTrainedModel(PreTrainedModel):
+ config_class = MPNetConfig
+ base_model_prefix = "mpnet"
+
+ def _init_weights(self, module):
+ """Initialize the weights"""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, nn.LayerNorm):
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+
+class MPNetEmbeddings(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.padding_idx = 1
+ self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=self.padding_idx)
+ self.position_embeddings = nn.Embedding(
+ config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx
+ )
+
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.register_buffer(
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
+ )
+
+ def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, **kwargs):
+ if position_ids is None:
+ if input_ids is not None:
+ position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx)
+ else:
+ position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds)
+
+ if input_ids is not None:
+ input_shape = input_ids.size()
+ else:
+ input_shape = inputs_embeds.size()[:-1]
+
+ seq_length = input_shape[1]
+
+ if position_ids is None:
+ position_ids = self.position_ids[:, :seq_length]
+
+ if inputs_embeds is None:
+ inputs_embeds = self.word_embeddings(input_ids)
+ position_embeddings = self.position_embeddings(position_ids)
+
+ embeddings = inputs_embeds + position_embeddings
+ embeddings = self.LayerNorm(embeddings)
+ embeddings = self.dropout(embeddings)
+ return embeddings
+
+ def create_position_ids_from_inputs_embeds(self, inputs_embeds):
+ """
+ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids.
+
+ Args:
+ inputs_embeds: torch.Tensor
+
+ Returns: torch.Tensor
+ """
+ input_shape = inputs_embeds.size()[:-1]
+ sequence_length = input_shape[1]
+
+ position_ids = torch.arange(
+ self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device
+ )
+ return position_ids.unsqueeze(0).expand(input_shape)
+
+
+class MPNetSelfAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads})"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.q = nn.Linear(config.hidden_size, self.all_head_size)
+ self.k = nn.Linear(config.hidden_size, self.all_head_size)
+ self.v = nn.Linear(config.hidden_size, self.all_head_size)
+ self.o = nn.Linear(config.hidden_size, config.hidden_size)
+
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
+
+ def transpose_for_scores(self, x):
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
+ x = x.view(*new_x_shape)
+ return x.permute(0, 2, 1, 3)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ **kwargs,
+ ):
+ q = self.q(hidden_states)
+ k = self.k(hidden_states)
+ v = self.v(hidden_states)
+
+ q = self.transpose_for_scores(q)
+ k = self.transpose_for_scores(k)
+ v = self.transpose_for_scores(v)
+
+ # Take the dot product between "query" and "key" to get the raw attention scores.
+ attention_scores = torch.matmul(q, k.transpose(-1, -2))
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
+
+ # Apply relative position embedding (precomputed in MPNetEncoder) if provided.
+ if position_bias is not None:
+ attention_scores += position_bias
+
+ if attention_mask is not None:
+ attention_scores = attention_scores + attention_mask
+
+ # Normalize the attention scores to probabilities.
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
+
+ attention_probs = self.dropout(attention_probs)
+
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ c = torch.matmul(attention_probs, v)
+
+ c = c.permute(0, 2, 1, 3).contiguous()
+ new_c_shape = c.size()[:-2] + (self.all_head_size,)
+ c = c.view(*new_c_shape)
+
+ o = self.o(c)
+
+ outputs = (o, attention_probs) if output_attentions else (o,)
+ return outputs
+
+
+class MPNetAttention(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attn = MPNetSelfAttention(config)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ self.pruned_heads = set()
+
+ def prune_heads(self, heads):
+ if len(heads) == 0:
+ return
+ heads, index = find_pruneable_heads_and_indices(
+ heads, self.attn.num_attention_heads, self.attn.attention_head_size, self.pruned_heads
+ )
+
+ self.attn.q = prune_linear_layer(self.attn.q, index)
+ self.attn.k = prune_linear_layer(self.attn.k, index)
+ self.attn.v = prune_linear_layer(self.attn.v, index)
+ self.attn.o = prune_linear_layer(self.attn.o, index, dim=1)
+
+ self.attn.num_attention_heads = self.attn.num_attention_heads - len(heads)
+ self.attn.all_head_size = self.attn.attention_head_size * self.attn.num_attention_heads
+ self.pruned_heads = self.pruned_heads.union(heads)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ **kwargs,
+ ):
+ self_outputs = self.attn(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ position_bias,
+ output_attentions=output_attentions,
+ )
+ attention_output = self.LayerNorm(self.dropout(self_outputs[0]) + hidden_states)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+
+# Copied from transformers.models.bert.modeling_bert.BertIntermediate
+class MPNetIntermediate(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
+ else:
+ self.intermediate_act_fn = config.hidden_act
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+ return hidden_states
+
+
+# Copied from transformers.models.bert.modeling_bert.BertOutput
+class MPNetOutput(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
+ self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.LayerNorm(hidden_states + input_tensor)
+ return hidden_states
+
+
+class MPNetLayer(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.attention = MPNetAttention(config)
+ self.intermediate = MPNetIntermediate(config)
+ self.output = MPNetOutput(config)
+
+ def forward(
+ self,
+ hidden_states,
+ attention_mask=None,
+ head_mask=None,
+ position_bias=None,
+ output_attentions=False,
+ **kwargs,
+ ):
+ self_attention_outputs = self.attention(
+ hidden_states,
+ attention_mask,
+ head_mask,
+ position_bias=position_bias,
+ output_attentions=output_attentions,
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.output(intermediate_output, attention_output)
+ outputs = (layer_output,) + outputs
+ return outputs
+
+
+class MPNetEncoder(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.config = config
+ self.n_heads = config.num_attention_heads
+ self.layer = nn.ModuleList([MPNetLayer(config) for _ in range(config.num_hidden_layers)])
+ self.relative_attention_bias = nn.Embedding(config.relative_attention_num_buckets, self.n_heads)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ attention_mask: Optional[torch.Tensor] = None,
+ head_mask: Optional[torch.Tensor] = None,
+ output_attentions: bool = False,
+ output_hidden_states: bool = False,
+ return_dict: bool = False,
+ **kwargs,
+ ):
+ position_bias = self.compute_position_bias(hidden_states)
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ head_mask[i],
+ position_bias,
+ output_attentions=output_attentions,
+ **kwargs,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+ return BaseModelOutput(
+ last_hidden_state=hidden_states,
+ hidden_states=all_hidden_states,
+ attentions=all_attentions,
+ )
+
+ def compute_position_bias(self, x, position_ids=None, num_buckets=32):
+ bsz, qlen, klen = x.size(0), x.size(1), x.size(1)
+ if position_ids is not None:
+ context_position = position_ids[:, :, None]
+ memory_position = position_ids[:, None, :]
+ else:
+ context_position = torch.arange(qlen, dtype=torch.long)[:, None]
+ memory_position = torch.arange(klen, dtype=torch.long)[None, :]
+
+ relative_position = memory_position - context_position
+
+ rp_bucket = self.relative_position_bucket(relative_position, num_buckets=num_buckets)
+ rp_bucket = rp_bucket.to(x.device)
+ values = self.relative_attention_bias(rp_bucket)
+ values = values.permute([2, 0, 1]).unsqueeze(0)
+ values = values.expand((bsz, -1, qlen, klen)).contiguous()
+ return values
+
+ @staticmethod
+ def relative_position_bucket(relative_position, num_buckets=32, max_distance=128):
+ ret = 0
+ n = -relative_position
+
+ num_buckets //= 2
+ ret += (n < 0).to(torch.long) * num_buckets
+ n = torch.abs(n)
+
+ max_exact = num_buckets // 2
+ is_small = n < max_exact
+
+ val_if_large = max_exact + (
+ torch.log(n.float() / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact)
+ ).to(torch.long)
+
+ val_if_large = torch.min(val_if_large, torch.full_like(val_if_large, num_buckets - 1))
+ ret += torch.where(is_small, n, val_if_large)
+ return ret
+
+
+# Copied from transformers.models.bert.modeling_bert.BertPooler
+class MPNetPooler(nn.Module):
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.activation = nn.Tanh()
+
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(first_token_tensor)
+ pooled_output = self.activation(pooled_output)
+ return pooled_output
+
+
+MPNET_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`MPNetConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MPNET_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.",
+ MPNET_START_DOCSTRING,
+)
+class MPNetModel(MPNetPreTrainedModel):
+ def __init__(self, config, add_pooling_layer=True):
+ super().__init__(config)
+ self.config = config
+
+ self.embeddings = MPNetEmbeddings(config)
+ self.encoder = MPNetEncoder(config)
+ self.pooler = MPNetPooler(config) if add_pooling_layer else None
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.embeddings.word_embeddings
+
+ def set_input_embeddings(self, value):
+ self.embeddings.word_embeddings = value
+
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ for layer, heads in heads_to_prune.items():
+ self.encoder.layer[layer].attention.prune_heads(heads)
+
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPooling,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **kwargs,
+ ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPooling]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
+ input_shape = input_ids.size()
+ elif inputs_embeds is not None:
+ input_shape = inputs_embeds.size()[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ device = input_ids.device if input_ids is not None else inputs_embeds.device
+
+ if attention_mask is None:
+ attention_mask = torch.ones(input_shape, device=device)
+ extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
+
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
+ embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds)
+ encoder_outputs = self.encoder(
+ embedding_output,
+ attention_mask=extended_attention_mask,
+ head_mask=head_mask,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
+
+ if not return_dict:
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
+
+ return BaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+
+class MPNetForMaskedLM(MPNetPreTrainedModel):
+ _tied_weights_keys = ["lm_head.decoder"]
+
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.mpnet = MPNetModel(config, add_pooling_layer=False)
+ self.lm_head = MPNetLMHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head.decoder
+
+ def set_output_embeddings(self, new_embeddings):
+ self.lm_head.decoder = new_embeddings
+
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mpnet(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ masked_lm_loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
+
+ return MaskedLMOutput(
+ loss=masked_lm_loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class MPNetLMHead(nn.Module):
+ """MPNet Head for masked and permuted language modeling."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
+
+ self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+ self.bias = nn.Parameter(torch.zeros(config.vocab_size))
+
+ # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
+ self.decoder.bias = self.bias
+
+ def forward(self, features, **kwargs):
+ x = self.dense(features)
+ x = gelu(x)
+ x = self.layer_norm(x)
+
+ # project back to size of vocabulary with bias
+ x = self.decoder(x)
+
+ return x
+
+
+@add_start_docstrings(
+ """
+ MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ MPNET_START_DOCSTRING,
+)
+class MPNetForSequenceClassification(MPNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.mpnet = MPNetModel(config, add_pooling_layer=False)
+ self.classifier = MPNetClassificationHead(config)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mpnet(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(logits, labels)
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ MPNET_START_DOCSTRING,
+)
+class MPNetForMultipleChoice(MPNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.mpnet = MPNetModel(config)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, 1)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=MultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
+ num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
+ `input_ids` above)
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
+
+ flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
+ flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
+ flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
+ flat_inputs_embeds = (
+ inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
+ if inputs_embeds is not None
+ else None
+ )
+
+ outputs = self.mpnet(
+ flat_input_ids,
+ position_ids=flat_position_ids,
+ attention_mask=flat_attention_mask,
+ head_mask=head_mask,
+ inputs_embeds=flat_inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ pooled_output = outputs[1]
+
+ pooled_output = self.dropout(pooled_output)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = logits.view(-1, num_choices)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(reshaped_logits, labels)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return MultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ MPNET_START_DOCSTRING,
+)
+class MPNetForTokenClassification(MPNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.mpnet = MPNetModel(config, add_pooling_layer=False)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ labels: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mpnet(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output)
+ logits = self.classifier(sequence_output)
+
+ loss = None
+ if labels is not None:
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+class MPNetClassificationHead(nn.Module):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config):
+ super().__init__()
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
+ self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
+
+ def forward(self, features, **kwargs):
+ x = features[:, 0, :] # take token (equiv. to BERT's [CLS] token)
+ x = self.dropout(x)
+ x = self.dense(x)
+ x = torch.tanh(x)
+ x = self.dropout(x)
+ x = self.out_proj(x)
+ return x
+
+
+@add_start_docstrings(
+ """
+ MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ MPNET_START_DOCSTRING,
+)
+class MPNetForQuestionAnswering(MPNetPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+
+ self.num_labels = config.num_labels
+ self.mpnet = MPNetModel(config, add_pooling_layer=False)
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=QuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ position_ids: Optional[torch.LongTensor] = None,
+ head_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.mpnet(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+
+def create_position_ids_from_input_ids(input_ids, padding_idx):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols
+ are ignored. This is modified from fairseq's `utils.make_positions`. :param torch.Tensor x: :return torch.Tensor:
+ """
+ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
+ mask = input_ids.ne(padding_idx).int()
+ incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask
+ return incremental_indices.long() + padding_idx
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/modeling_tf_mpnet.py b/venv/lib/python3.10/site-packages/transformers/models/mpnet/modeling_tf_mpnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..b57132d81398d02998983807fc53fe6421ce5380
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mpnet/modeling_tf_mpnet.py
@@ -0,0 +1,1345 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" TF 2.0 MPNet model."""
+
+
+from __future__ import annotations
+
+import math
+import warnings
+from typing import Optional, Tuple, Union
+
+import numpy as np
+import tensorflow as tf
+
+from ...activations_tf import get_tf_activation
+from ...modeling_tf_outputs import (
+ TFBaseModelOutput,
+ TFBaseModelOutputWithPooling,
+ TFMaskedLMOutput,
+ TFMultipleChoiceModelOutput,
+ TFQuestionAnsweringModelOutput,
+ TFSequenceClassifierOutput,
+ TFTokenClassifierOutput,
+)
+from ...modeling_tf_utils import (
+ TFMaskedLanguageModelingLoss,
+ TFModelInputType,
+ TFMultipleChoiceLoss,
+ TFPreTrainedModel,
+ TFQuestionAnsweringLoss,
+ TFSequenceClassificationLoss,
+ TFTokenClassificationLoss,
+ get_initializer,
+ keras,
+ keras_serializable,
+ unpack_inputs,
+)
+from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
+from ...utils import (
+ add_code_sample_docstrings,
+ add_start_docstrings,
+ add_start_docstrings_to_model_forward,
+ logging,
+)
+from .configuration_mpnet import MPNetConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "microsoft/mpnet-base"
+_CONFIG_FOR_DOC = "MPNetConfig"
+
+
+from ..deprecated._archive_maps import TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+class TFMPNetPreTrainedModel(TFPreTrainedModel):
+ """
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
+ models.
+ """
+
+ config_class = MPNetConfig
+ base_model_prefix = "mpnet"
+
+
+class TFMPNetEmbeddings(keras.layers.Layer):
+ """Construct the embeddings from word, position embeddings."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.padding_idx = 1
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.max_position_embeddings = config.max_position_embeddings
+ self.initializer_range = config.initializer_range
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+
+ def build(self, input_shape=None):
+ with tf.name_scope("word_embeddings"):
+ self.weight = self.add_weight(
+ name="weight",
+ shape=[self.config.vocab_size, self.hidden_size],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ with tf.name_scope("position_embeddings"):
+ self.position_embeddings = self.add_weight(
+ name="embeddings",
+ shape=[self.max_position_embeddings, self.hidden_size],
+ initializer=get_initializer(initializer_range=self.initializer_range),
+ )
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+ def create_position_ids_from_input_ids(self, input_ids):
+ """
+ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding
+ symbols are ignored. This is modified from fairseq's `utils.make_positions`.
+
+ Args:
+ input_ids: tf.Tensor
+ Returns: tf.Tensor
+ """
+ mask = tf.cast(tf.math.not_equal(input_ids, self.padding_idx), dtype=input_ids.dtype)
+ incremental_indices = tf.math.cumsum(mask, axis=1) * mask
+
+ return incremental_indices + self.padding_idx
+
+ def call(self, input_ids=None, position_ids=None, inputs_embeds=None, training=False):
+ """
+ Applies embedding based on inputs tensor.
+
+ Returns:
+ final_embeddings (`tf.Tensor`): output embedding tensor.
+ """
+ assert not (input_ids is None and inputs_embeds is None)
+
+ if input_ids is not None:
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
+
+ input_shape = shape_list(inputs_embeds)[:-1]
+
+ if position_ids is None:
+ if input_ids is not None:
+ # Create the position ids from the input token ids. Any padded tokens remain padded.
+ position_ids = self.create_position_ids_from_input_ids(input_ids=input_ids)
+ else:
+ position_ids = tf.expand_dims(
+ tf.range(start=self.padding_idx + 1, limit=input_shape[-1] + self.padding_idx + 1), axis=0
+ )
+
+ position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
+ final_embeddings = inputs_embeds + position_embeds
+ final_embeddings = self.LayerNorm(inputs=final_embeddings)
+ final_embeddings = self.dropout(inputs=final_embeddings, training=training)
+
+ return final_embeddings
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertPooler with Bert->MPNet
+class TFMPNetPooler(keras.layers.Layer):
+ def __init__(self, config: MPNetConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ # We "pool" the model by simply taking the hidden state corresponding
+ # to the first token.
+ first_token_tensor = hidden_states[:, 0]
+ pooled_output = self.dense(inputs=first_token_tensor)
+
+ return pooled_output
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+class TFMPNetSelfAttention(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ if config.hidden_size % config.num_attention_heads != 0:
+ raise ValueError(
+ f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
+ f"heads ({config.num_attention_heads}"
+ )
+
+ self.num_attention_heads = config.num_attention_heads
+ assert config.hidden_size % config.num_attention_heads == 0
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
+
+ self.q = keras.layers.Dense(
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="q"
+ )
+ self.k = keras.layers.Dense(
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="k"
+ )
+ self.v = keras.layers.Dense(
+ self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="v"
+ )
+ self.o = keras.layers.Dense(
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="o"
+ )
+ self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
+ self.config = config
+
+ def transpose_for_scores(self, x, batch_size):
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
+ x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
+
+ return tf.transpose(x, perm=[0, 2, 1, 3])
+
+ def call(self, hidden_states, attention_mask, head_mask, output_attentions, position_bias=None, training=False):
+ batch_size = shape_list(hidden_states)[0]
+
+ q = self.q(hidden_states)
+ k = self.k(hidden_states)
+ v = self.v(hidden_states)
+
+ q = self.transpose_for_scores(q, batch_size)
+ k = self.transpose_for_scores(k, batch_size)
+ v = self.transpose_for_scores(v, batch_size)
+
+ attention_scores = tf.matmul(q, k, transpose_b=True)
+ dk = tf.cast(shape_list(k)[-1], attention_scores.dtype)
+ attention_scores = attention_scores / tf.math.sqrt(dk)
+
+ # Apply relative position embedding (precomputed in MPNetEncoder) if provided.
+ if position_bias is not None:
+ attention_scores += position_bias
+
+ if attention_mask is not None:
+ attention_scores = attention_scores + attention_mask
+
+ attention_probs = stable_softmax(attention_scores, axis=-1)
+
+ attention_probs = self.dropout(attention_probs, training=training)
+
+ if head_mask is not None:
+ attention_probs = attention_probs * head_mask
+
+ c = tf.matmul(attention_probs, v)
+ c = tf.transpose(c, perm=[0, 2, 1, 3])
+ c = tf.reshape(c, (batch_size, -1, self.all_head_size))
+ o = self.o(c)
+
+ outputs = (o, attention_probs) if output_attentions else (o,)
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "q", None) is not None:
+ with tf.name_scope(self.q.name):
+ self.q.build([None, None, self.config.hidden_size])
+ if getattr(self, "k", None) is not None:
+ with tf.name_scope(self.k.name):
+ self.k.build([None, None, self.config.hidden_size])
+ if getattr(self, "v", None) is not None:
+ with tf.name_scope(self.v.name):
+ self.v.build([None, None, self.config.hidden_size])
+ if getattr(self, "o", None) is not None:
+ with tf.name_scope(self.o.name):
+ self.o.build([None, None, self.config.hidden_size])
+
+
+class TFMPNetAttention(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.attn = TFMPNetSelfAttention(config, name="attn")
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.config = config
+
+ def prune_heads(self, heads):
+ raise NotImplementedError
+
+ def call(self, input_tensor, attention_mask, head_mask, output_attentions, position_bias=None, training=False):
+ self_outputs = self.attn(
+ input_tensor, attention_mask, head_mask, output_attentions, position_bias=position_bias, training=training
+ )
+ attention_output = self.LayerNorm(self.dropout(self_outputs[0]) + input_tensor)
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attn", None) is not None:
+ with tf.name_scope(self.attn.name):
+ self.attn.build(None)
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->MPNet
+class TFMPNetIntermediate(keras.layers.Layer):
+ def __init__(self, config: MPNetConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+
+ if isinstance(config.hidden_act, str):
+ self.intermediate_act_fn = get_tf_activation(config.hidden_act)
+ else:
+ self.intermediate_act_fn = config.hidden_act
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.intermediate_act_fn(hidden_states)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+
+
+# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->MPNet
+class TFMPNetOutput(keras.layers.Layer):
+ def __init__(self, config: MPNetConfig, **kwargs):
+ super().__init__(**kwargs)
+
+ self.dense = keras.layers.Dense(
+ units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
+ self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
+ self.config = config
+
+ def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
+ hidden_states = self.dense(inputs=hidden_states)
+ hidden_states = self.dropout(inputs=hidden_states, training=training)
+ hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
+
+ return hidden_states
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.intermediate_size])
+ if getattr(self, "LayerNorm", None) is not None:
+ with tf.name_scope(self.LayerNorm.name):
+ self.LayerNorm.build([None, None, self.config.hidden_size])
+
+
+class TFMPNetLayer(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.attention = TFMPNetAttention(config, name="attention")
+ self.intermediate = TFMPNetIntermediate(config, name="intermediate")
+ self.out = TFMPNetOutput(config, name="output")
+
+ def call(self, hidden_states, attention_mask, head_mask, output_attentions, position_bias=None, training=False):
+ self_attention_outputs = self.attention(
+ hidden_states, attention_mask, head_mask, output_attentions, position_bias=position_bias, training=training
+ )
+ attention_output = self_attention_outputs[0]
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
+
+ intermediate_output = self.intermediate(attention_output)
+ layer_output = self.out(intermediate_output, attention_output, training=training)
+ outputs = (layer_output,) + outputs # add attentions if we output them
+
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "attention", None) is not None:
+ with tf.name_scope(self.attention.name):
+ self.attention.build(None)
+ if getattr(self, "intermediate", None) is not None:
+ with tf.name_scope(self.intermediate.name):
+ self.intermediate.build(None)
+ if getattr(self, "out", None) is not None:
+ with tf.name_scope(self.out.name):
+ self.out.build(None)
+
+
+class TFMPNetEncoder(keras.layers.Layer):
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.n_heads = config.num_attention_heads
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+ self.initializer_range = config.initializer_range
+
+ self.layer = [TFMPNetLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
+ self.relative_attention_num_buckets = config.relative_attention_num_buckets
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ with tf.name_scope("relative_attention_bias"):
+ self.relative_attention_bias = self.add_weight(
+ name="embeddings",
+ shape=[self.relative_attention_num_buckets, self.n_heads],
+ initializer=get_initializer(self.initializer_range),
+ )
+ if getattr(self, "layer", None) is not None:
+ for layer in self.layer:
+ with tf.name_scope(layer.name):
+ layer.build(None)
+
+ def call(
+ self,
+ hidden_states,
+ attention_mask,
+ head_mask,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training=False,
+ ):
+ position_bias = self.compute_position_bias(hidden_states)
+ all_hidden_states = () if output_hidden_states else None
+ all_attentions = () if output_attentions else None
+
+ for i, layer_module in enumerate(self.layer):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ layer_outputs = layer_module(
+ hidden_states,
+ attention_mask,
+ head_mask[i],
+ output_attentions,
+ position_bias=position_bias,
+ training=training,
+ )
+ hidden_states = layer_outputs[0]
+
+ if output_attentions:
+ all_attentions = all_attentions + (layer_outputs[1],)
+
+ # Add last layer
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
+
+ return TFBaseModelOutput(
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
+ )
+
+ @staticmethod
+ def _relative_position_bucket(relative_position, num_buckets=32, max_distance=128):
+ ret = 0
+ n = -relative_position
+
+ num_buckets //= 2
+ ret += tf.cast(tf.math.less(n, 0), dtype=relative_position.dtype) * num_buckets
+ n = tf.math.abs(n)
+
+ # now n is in the range [0, inf)
+ max_exact = num_buckets // 2
+ is_small = tf.math.less(n, max_exact)
+
+ val_if_large = max_exact + tf.cast(
+ tf.math.log(n / max_exact) / math.log(max_distance / max_exact) * (num_buckets - max_exact),
+ dtype=relative_position.dtype,
+ )
+
+ val_if_large = tf.math.minimum(val_if_large, num_buckets - 1)
+ ret += tf.where(is_small, n, val_if_large)
+ return ret
+
+ def compute_position_bias(self, x, position_ids=None):
+ """Compute binned relative position bias"""
+ input_shape = shape_list(x)
+ qlen, klen = input_shape[1], input_shape[1]
+
+ if position_ids is not None:
+ context_position = position_ids[:, :, None]
+ memory_position = position_ids[:, None, :]
+ else:
+ context_position = tf.range(qlen)[:, None]
+ memory_position = tf.range(klen)[None, :]
+
+ relative_position = memory_position - context_position # shape (qlen, klen)
+
+ rp_bucket = self._relative_position_bucket(
+ relative_position,
+ num_buckets=self.relative_attention_num_buckets,
+ )
+ values = tf.gather(self.relative_attention_bias, rp_bucket) # shape (qlen, klen, num_heads)
+ values = tf.expand_dims(tf.transpose(values, [2, 0, 1]), axis=0) # shape (1, num_heads, qlen, klen)
+ return values
+
+
+@keras_serializable
+class TFMPNetMainLayer(keras.layers.Layer):
+ config_class = MPNetConfig
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.num_hidden_layers = config.num_hidden_layers
+ self.initializer_range = config.initializer_range
+ self.output_attentions = config.output_attentions
+ self.output_hidden_states = config.output_hidden_states
+ self.return_dict = config.use_return_dict
+ self.encoder = TFMPNetEncoder(config, name="encoder")
+ self.pooler = TFMPNetPooler(config, name="pooler")
+ # The embeddings must be the last declaration in order to follow the weights order
+ self.embeddings = TFMPNetEmbeddings(config, name="embeddings")
+
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.get_input_embeddings
+ def get_input_embeddings(self) -> keras.layers.Layer:
+ return self.embeddings
+
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer.set_input_embeddings
+ def set_input_embeddings(self, value: tf.Variable):
+ self.embeddings.weight = value
+ self.embeddings.vocab_size = shape_list(value)[0]
+
+ # Copied from transformers.models.bert.modeling_tf_bert.TFBertMainLayer._prune_heads
+ def _prune_heads(self, heads_to_prune):
+ """
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
+ class PreTrainedModel
+ """
+ raise NotImplementedError
+
+ @unpack_inputs
+ def call(
+ self,
+ input_ids=None,
+ attention_mask=None,
+ position_ids=None,
+ head_mask=None,
+ inputs_embeds=None,
+ output_attentions=None,
+ output_hidden_states=None,
+ return_dict=None,
+ training=False,
+ ):
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ input_shape = shape_list(input_ids)
+ elif inputs_embeds is not None:
+ input_shape = shape_list(inputs_embeds)[:-1]
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if attention_mask is None:
+ attention_mask = tf.fill(input_shape, 1)
+
+ embedding_output = self.embeddings(
+ input_ids,
+ position_ids,
+ inputs_embeds,
+ training=training,
+ )
+
+ # We create a 3D attention mask from a 2D tensor mask.
+ # Sizes are [batch_size, 1, 1, to_seq_length]
+ # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
+ # this attention mask is more simple than the triangular masking of causal attention
+ # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
+ extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
+
+ # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
+ # masked positions, this operation will create a tensor which is 0.0 for
+ # positions we want to attend and -10000.0 for masked positions.
+ # Since we are adding it to the raw scores before the softmax, this is
+ # effectively the same as removing these entirely.
+ extended_attention_mask = tf.cast(extended_attention_mask, embedding_output.dtype)
+ one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
+ ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
+ extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
+
+ # Prepare head mask if needed
+ # 1.0 in head_mask indicate we keep the head
+ # attention_probs has shape bsz x n_heads x N x N
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
+ if head_mask is not None:
+ raise NotImplementedError
+ else:
+ head_mask = [None] * self.num_hidden_layers
+
+ encoder_outputs = self.encoder(
+ embedding_output,
+ extended_attention_mask,
+ head_mask,
+ output_attentions,
+ output_hidden_states,
+ return_dict,
+ training=training,
+ )
+
+ sequence_output = encoder_outputs[0]
+ pooled_output = self.pooler(sequence_output)
+
+ if not return_dict:
+ return (
+ sequence_output,
+ pooled_output,
+ ) + encoder_outputs[1:]
+
+ return TFBaseModelOutputWithPooling(
+ last_hidden_state=sequence_output,
+ pooler_output=pooled_output,
+ hidden_states=encoder_outputs.hidden_states,
+ attentions=encoder_outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "encoder", None) is not None:
+ with tf.name_scope(self.encoder.name):
+ self.encoder.build(None)
+ if getattr(self, "pooler", None) is not None:
+ with tf.name_scope(self.pooler.name):
+ self.pooler.build(None)
+ if getattr(self, "embeddings", None) is not None:
+ with tf.name_scope(self.embeddings.name):
+ self.embeddings.build(None)
+
+
+MPNET_START_DOCSTRING = r"""
+
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
+ behavior.
+
+
+
+ TensorFlow models and layers in `transformers` accept two formats as input:
+
+ - having all inputs as keyword arguments (like PyTorch models), or
+ - having all inputs as a list, tuple or dict in the first positional argument.
+
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
+ positional argument:
+
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
+
+ Note that when creating models and layers with
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
+ about any of this, as you can just pass inputs like you would to any other Python function!
+
+
+
+ Args:
+ config ([`MPNetConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MPNET_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
+ Indices of input sequence tokens in the vocabulary.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
+ [`PreTrainedTokenizer.encode`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+ position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
+ config.max_position_embeddings - 1]`.
+
+ [What are position IDs?](../glossary#position-ids)
+ head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
+
+ - 1 indicates the head is **not masked**,
+ - 0 indicates the head is **masked**.
+
+ inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
+ config will be used instead.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
+ used instead.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
+ eager mode, in graph mode the value will always be set to True.
+ training (`bool`, *optional*, defaults to `False`):
+ Whether or not to use the model in training mode (some modules like dropout modules have different
+ behaviors between training and evaluation).
+"""
+
+
+@add_start_docstrings(
+ "The bare MPNet Model transformer outputting raw hidden-states without any specific head on top.",
+ MPNET_START_DOCSTRING,
+)
+class TFMPNetModel(TFMPNetPreTrainedModel):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFBaseModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: Optional[Union[np.array, tf.Tensor]] = None,
+ position_ids: Optional[Union[np.array, tf.Tensor]] = None,
+ head_mask: Optional[Union[np.array, tf.Tensor]] = None,
+ inputs_embeds: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ training: bool = False,
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
+ outputs = self.mpnet(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ return outputs
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mpnet", None) is not None:
+ with tf.name_scope(self.mpnet.name):
+ self.mpnet.build(None)
+
+
+class TFMPNetLMHead(keras.layers.Layer):
+ """MPNet head for masked and permuted language modeling"""
+
+ def __init__(self, config, input_embeddings, **kwargs):
+ super().__init__(**kwargs)
+
+ self.config = config
+ self.hidden_size = config.hidden_size
+ self.dense = keras.layers.Dense(
+ config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
+ )
+ self.layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm")
+ self.act = get_tf_activation("gelu")
+
+ # The output weights are the same as the input embeddings, but there is
+ # an output-only bias for each token.
+ self.decoder = input_embeddings
+
+ def build(self, input_shape=None):
+ self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
+
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "layer_norm", None) is not None:
+ with tf.name_scope(self.layer_norm.name):
+ self.layer_norm.build([None, None, self.config.hidden_size])
+
+ def get_output_embeddings(self):
+ return self.decoder
+
+ def set_output_embeddings(self, value):
+ self.decoder.weight = value
+ self.decoder.vocab_size = shape_list(value)[0]
+
+ def get_bias(self):
+ return {"bias": self.bias}
+
+ def set_bias(self, value):
+ self.bias = value["bias"]
+ self.config.vocab_size = shape_list(value["bias"])[0]
+
+ def call(self, hidden_states):
+ hidden_states = self.dense(hidden_states)
+ hidden_states = self.act(hidden_states)
+ hidden_states = self.layer_norm(hidden_states)
+
+ # project back to size of vocabulary with bias
+ seq_length = shape_list(tensor=hidden_states)[1]
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size])
+ hidden_states = tf.matmul(a=hidden_states, b=self.decoder.weight, transpose_b=True)
+ hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
+ hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
+
+ return hidden_states
+
+
+@add_start_docstrings("""MPNet Model with a `language modeling` head on top.""", MPNET_START_DOCSTRING)
+class TFMPNetForMaskedLM(TFMPNetPreTrainedModel, TFMaskedLanguageModelingLoss):
+ _keys_to_ignore_on_load_missing = [r"pooler"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
+ self.lm_head = TFMPNetLMHead(config, self.mpnet.embeddings, name="lm_head")
+
+ def get_lm_head(self):
+ return self.lm_head
+
+ def get_prefix_bias_name(self):
+ warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
+ return self.name + "/" + self.lm_head.name
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMaskedLMOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
+ config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
+ loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
+ """
+ outputs = self.mpnet(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+ prediction_scores = self.lm_head(sequence_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
+
+ if not return_dict:
+ output = (prediction_scores,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMaskedLMOutput(
+ loss=loss,
+ logits=prediction_scores,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mpnet", None) is not None:
+ with tf.name_scope(self.mpnet.name):
+ self.mpnet.build(None)
+ if getattr(self, "lm_head", None) is not None:
+ with tf.name_scope(self.lm_head.name):
+ self.lm_head.build(None)
+
+
+class TFMPNetClassificationHead(keras.layers.Layer):
+ """Head for sentence-level classification tasks."""
+
+ def __init__(self, config, **kwargs):
+ super().__init__(**kwargs)
+ self.dense = keras.layers.Dense(
+ config.hidden_size,
+ kernel_initializer=get_initializer(config.initializer_range),
+ activation="tanh",
+ name="dense",
+ )
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.out_proj = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
+ )
+ self.config = config
+
+ def call(self, features, training=False):
+ x = features[:, 0, :] # take token (equiv. to [CLS])
+ x = self.dropout(x, training=training)
+ x = self.dense(x)
+ x = self.dropout(x, training=training)
+ x = self.out_proj(x)
+ return x
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "dense", None) is not None:
+ with tf.name_scope(self.dense.name):
+ self.dense.build([None, None, self.config.hidden_size])
+ if getattr(self, "out_proj", None) is not None:
+ with tf.name_scope(self.out_proj.name):
+ self.out_proj.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ MPNet Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
+ output) e.g. for GLUE tasks.
+ """,
+ MPNET_START_DOCSTRING,
+)
+class TFMPNetForSequenceClassification(TFMPNetPreTrainedModel, TFSequenceClassificationLoss):
+ _keys_to_ignore_on_load_missing = [r"pooler"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
+ self.classifier = TFMPNetClassificationHead(config, name="classifier")
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFSequenceClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: Optional[Union[np.array, tf.Tensor]] = None,
+ position_ids: Optional[Union[np.array, tf.Tensor]] = None,
+ head_mask: Optional[Union[np.array, tf.Tensor]] = None,
+ inputs_embeds: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ outputs = self.mpnet(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+
+ sequence_output = outputs[0]
+ logits = self.classifier(sequence_output, training=training)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFSequenceClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mpnet", None) is not None:
+ with tf.name_scope(self.mpnet.name):
+ self.mpnet.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build(None)
+
+
+@add_start_docstrings(
+ """
+ MPNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
+ softmax) e.g. for RocStories/SWAG tasks.
+ """,
+ MPNET_START_DOCSTRING,
+)
+class TFMPNetForMultipleChoice(TFMPNetPreTrainedModel, TFMultipleChoiceLoss):
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFMultipleChoiceModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
+ where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
+ """
+ if input_ids is not None:
+ num_choices = shape_list(input_ids)[1]
+ seq_length = shape_list(input_ids)[2]
+ else:
+ num_choices = shape_list(inputs_embeds)[1]
+ seq_length = shape_list(inputs_embeds)[2]
+
+ flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
+ flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
+ flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
+ flat_inputs_embeds = (
+ tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
+ if inputs_embeds is not None
+ else None
+ )
+ outputs = self.mpnet(
+ flat_input_ids,
+ flat_attention_mask,
+ flat_position_ids,
+ head_mask,
+ flat_inputs_embeds,
+ output_attentions,
+ output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ pooled_output = outputs[1]
+ pooled_output = self.dropout(pooled_output, training=training)
+ logits = self.classifier(pooled_output)
+ reshaped_logits = tf.reshape(logits, (-1, num_choices))
+ loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
+
+ if not return_dict:
+ output = (reshaped_logits,) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFMultipleChoiceModelOutput(
+ loss=loss,
+ logits=reshaped_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mpnet", None) is not None:
+ with tf.name_scope(self.mpnet.name):
+ self.mpnet.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ MPNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ MPNET_START_DOCSTRING,
+)
+class TFMPNetForTokenClassification(TFMPNetPreTrainedModel, TFTokenClassificationLoss):
+ _keys_to_ignore_on_load_missing = [r"pooler"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+
+ self.num_labels = config.num_labels
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
+ self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
+ self.classifier = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFTokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: np.ndarray | tf.Tensor | None = None,
+ position_ids: np.ndarray | tf.Tensor | None = None,
+ head_mask: np.ndarray | tf.Tensor | None = None,
+ inputs_embeds: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ labels: tf.Tensor | None = None,
+ training: bool = False,
+ ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
+ r"""
+ labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
+ """
+ outputs = self.mpnet(
+ input_ids=input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+
+ sequence_output = self.dropout(sequence_output, training=training)
+ logits = self.classifier(sequence_output)
+
+ loss = None if labels is None else self.hf_compute_loss(labels, logits)
+
+ if not return_dict:
+ output = (logits,) + outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFTokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mpnet", None) is not None:
+ with tf.name_scope(self.mpnet.name):
+ self.mpnet.build(None)
+ if getattr(self, "classifier", None) is not None:
+ with tf.name_scope(self.classifier.name):
+ self.classifier.build([None, None, self.config.hidden_size])
+
+
+@add_start_docstrings(
+ """
+ MPNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
+ layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ MPNET_START_DOCSTRING,
+)
+class TFMPNetForQuestionAnswering(TFMPNetPreTrainedModel, TFQuestionAnsweringLoss):
+ _keys_to_ignore_on_load_missing = [r"pooler"]
+
+ def __init__(self, config, *inputs, **kwargs):
+ super().__init__(config, *inputs, **kwargs)
+ self.num_labels = config.num_labels
+
+ self.mpnet = TFMPNetMainLayer(config, name="mpnet")
+ self.qa_outputs = keras.layers.Dense(
+ config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
+ )
+ self.config = config
+
+ @unpack_inputs
+ @add_start_docstrings_to_model_forward(MPNET_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TFQuestionAnsweringModelOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def call(
+ self,
+ input_ids: TFModelInputType | None = None,
+ attention_mask: Optional[Union[np.array, tf.Tensor]] = None,
+ position_ids: Optional[Union[np.array, tf.Tensor]] = None,
+ head_mask: Optional[Union[np.array, tf.Tensor]] = None,
+ inputs_embeds: tf.Tensor | None = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ start_positions: tf.Tensor | None = None,
+ end_positions: tf.Tensor | None = None,
+ training: bool = False,
+ **kwargs,
+ ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
+ r"""
+ start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ outputs = self.mpnet(
+ input_ids,
+ attention_mask=attention_mask,
+ position_ids=position_ids,
+ head_mask=head_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ training=training,
+ )
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = tf.split(logits, 2, axis=-1)
+ start_logits = tf.squeeze(start_logits, axis=-1)
+ end_logits = tf.squeeze(end_logits, axis=-1)
+ loss = None
+
+ if start_positions is not None and end_positions is not None:
+ labels = {"start_position": start_positions, "end_position": end_positions}
+ loss = self.hf_compute_loss(labels, (start_logits, end_logits))
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TFQuestionAnsweringModelOutput(
+ loss=loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
+
+ def build(self, input_shape=None):
+ if self.built:
+ return
+ self.built = True
+ if getattr(self, "mpnet", None) is not None:
+ with tf.name_scope(self.mpnet.name):
+ self.mpnet.build(None)
+ if getattr(self, "qa_outputs", None) is not None:
+ with tf.name_scope(self.qa_outputs.name):
+ self.qa_outputs.build([None, None, self.config.hidden_size])
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/tokenization_mpnet.py b/venv/lib/python3.10/site-packages/transformers/models/mpnet/tokenization_mpnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..003575300e85728be0b8f13c88ec076e714fba59
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mpnet/tokenization_mpnet.py
@@ -0,0 +1,529 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Tokenization classes for MPNet."""
+
+import collections
+import os
+import unicodedata
+from typing import List, Optional, Tuple
+
+from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
+
+
+def load_vocab(vocab_file):
+ """Loads a vocabulary file into a dictionary."""
+ vocab = collections.OrderedDict()
+ with open(vocab_file, "r", encoding="utf-8") as reader:
+ tokens = reader.readlines()
+ for index, token in enumerate(tokens):
+ token = token.rstrip("\n")
+ vocab[token] = index
+ return vocab
+
+
+def whitespace_tokenize(text):
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
+ text = text.strip()
+ if not text:
+ return []
+ tokens = text.split()
+ return tokens
+
+
+class MPNetTokenizer(PreTrainedTokenizer):
+ """
+
+ This tokenizer inherits from [`BertTokenizer`] which contains most of the methods. Users should refer to the
+ superclass for more information regarding methods.
+
+ Args:
+ vocab_file (`str`):
+ Path to the vocabulary file.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ do_basic_tokenize (`bool`, *optional*, defaults to `True`):
+ Whether or not to do basic tokenization before WordPiece.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file,
+ do_lower_case=True,
+ do_basic_tokenize=True,
+ never_split=None,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="[UNK]",
+ pad_token="",
+ mask_token="",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, special=True) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, special=True) if isinstance(eos_token, str) else eos_token
+ sep_token = AddedToken(sep_token, special=True) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, special=True) if isinstance(cls_token, str) else cls_token
+ unk_token = AddedToken(unk_token, special=True) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, special=True) if isinstance(pad_token, str) else pad_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, special=True) if isinstance(mask_token, str) else mask_token
+
+ if not os.path.isfile(vocab_file):
+ raise ValueError(
+ f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
+ " model use `tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
+ )
+ self.vocab = load_vocab(vocab_file)
+ self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
+ self.do_basic_tokenize = do_basic_tokenize
+ if do_basic_tokenize:
+ self.basic_tokenizer = BasicTokenizer(
+ do_lower_case=do_lower_case,
+ never_split=never_split,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ )
+ self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
+
+ super().__init__(
+ do_lower_case=do_lower_case,
+ do_basic_tokenize=do_basic_tokenize,
+ never_split=never_split,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ unk_token=unk_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ @property
+ def do_lower_case(self):
+ return self.basic_tokenizer.do_lower_case
+
+ @property
+ def vocab_size(self):
+ return len(self.vocab)
+
+ def get_vocab(self):
+ # "" is part of the vocab, but was wrongfully added at a wrong index in the fast saved version
+ vocab = self.added_tokens_encoder.copy()
+ vocab.update(self.vocab)
+ return vocab
+
+ def _tokenize(self, text):
+ split_tokens = []
+ if self.do_basic_tokenize:
+ for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):
+ # If the token is part of the never_split set
+ if token in self.basic_tokenizer.never_split:
+ split_tokens.append(token)
+ else:
+ split_tokens += self.wordpiece_tokenizer.tokenize(token)
+ else:
+ split_tokens = self.wordpiece_tokenizer.tokenize(text)
+ return split_tokens
+
+ def _convert_token_to_id(self, token):
+ """Converts a token (str) in an id using the vocab."""
+ return self.vocab.get(token, self.vocab.get(self.unk_token))
+
+ def _convert_id_to_token(self, index):
+ """Converts an index (integer) in a token (str) using the vocab."""
+ return self.ids_to_tokens.get(index, self.unk_token)
+
+ def convert_tokens_to_string(self, tokens):
+ """Converts a sequence of tokens (string) in a single string."""
+ out_string = " ".join(tokens).replace(" ##", "").strip()
+ return out_string
+
+ def build_inputs_with_special_tokens(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
+ adding special tokens. A MPNet sequence has the following format:
+
+ - single sequence: ` X `
+ - pair of sequences: ` A B `
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of IDs to which the special tokens will be added
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens.
+ """
+ if token_ids_1 is None:
+ return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
+ cls = [self.cls_token_id]
+ sep = [self.sep_token_id]
+ return cls + token_ids_0 + sep + sep + token_ids_1 + sep
+
+ def get_special_tokens_mask(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
+ ) -> List[int]:
+ """
+ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
+ special tokens using the tokenizer `prepare_for_model` methods.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
+ Set to True if the token list is already formatted with special tokens for the model
+
+ Returns:
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
+ """
+ if already_has_special_tokens:
+ return super().get_special_tokens_mask(
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
+ )
+
+ if token_ids_1 is None:
+ return [1] + ([0] * len(token_ids_0)) + [1]
+ return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not
+ make use of token type ids, therefore a list of zeros is returned.
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs.
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ index = 0
+ if os.path.isdir(save_directory):
+ vocab_file = os.path.join(
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
+ )
+ else:
+ vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
+ with open(vocab_file, "w", encoding="utf-8") as writer:
+ for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
+ if index != token_index:
+ logger.warning(
+ f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
+ " Please check that the vocabulary is not corrupted!"
+ )
+ index = token_index
+ writer.write(token + "\n")
+ index += 1
+ return (vocab_file,)
+
+
+# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
+class BasicTokenizer(object):
+ """
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
+
+ Args:
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ never_split (`Iterable`, *optional*):
+ Collection of tokens which will never be split during tokenization. Only has an effect when
+ `do_basic_tokenize=True`
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters.
+
+ This should likely be deactivated for Japanese (see this
+ [issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
+ the full context of the words, such as contractions.
+ """
+
+ def __init__(
+ self,
+ do_lower_case=True,
+ never_split=None,
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ do_split_on_punc=True,
+ ):
+ if never_split is None:
+ never_split = []
+ self.do_lower_case = do_lower_case
+ self.never_split = set(never_split)
+ self.tokenize_chinese_chars = tokenize_chinese_chars
+ self.strip_accents = strip_accents
+ self.do_split_on_punc = do_split_on_punc
+
+ def tokenize(self, text, never_split=None):
+ """
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
+
+ Args:
+ never_split (`List[str]`, *optional*)
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
+ """
+ # union() returns a new set by concatenating the two sets.
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
+ text = self._clean_text(text)
+
+ # This was added on November 1st, 2018 for the multilingual and Chinese
+ # models. This is also applied to the English models now, but it doesn't
+ # matter since the English models were not trained on any Chinese data
+ # and generally don't have any Chinese data in them (there are Chinese
+ # characters in the vocabulary because Wikipedia does have some Chinese
+ # words in the English Wikipedia.).
+ if self.tokenize_chinese_chars:
+ text = self._tokenize_chinese_chars(text)
+ # prevents treating the same character with different unicode codepoints as different characters
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
+ split_tokens = []
+ for token in orig_tokens:
+ if token not in never_split:
+ if self.do_lower_case:
+ token = token.lower()
+ if self.strip_accents is not False:
+ token = self._run_strip_accents(token)
+ elif self.strip_accents:
+ token = self._run_strip_accents(token)
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
+
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
+ return output_tokens
+
+ def _run_strip_accents(self, text):
+ """Strips accents from a piece of text."""
+ text = unicodedata.normalize("NFD", text)
+ output = []
+ for char in text:
+ cat = unicodedata.category(char)
+ if cat == "Mn":
+ continue
+ output.append(char)
+ return "".join(output)
+
+ def _run_split_on_punc(self, text, never_split=None):
+ """Splits punctuation on a piece of text."""
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
+ return [text]
+ chars = list(text)
+ i = 0
+ start_new_word = True
+ output = []
+ while i < len(chars):
+ char = chars[i]
+ if _is_punctuation(char):
+ output.append([char])
+ start_new_word = True
+ else:
+ if start_new_word:
+ output.append([])
+ start_new_word = False
+ output[-1].append(char)
+ i += 1
+
+ return ["".join(x) for x in output]
+
+ def _tokenize_chinese_chars(self, text):
+ """Adds whitespace around any CJK character."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if self._is_chinese_char(cp):
+ output.append(" ")
+ output.append(char)
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+ def _is_chinese_char(self, cp):
+ """Checks whether CP is the codepoint of a CJK character."""
+ # This defines a "chinese character" as anything in the CJK Unicode block:
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
+ #
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
+ # despite its name. The modern Korean Hangul alphabet is a different block,
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
+ # space-separated words, so they are not treated specially and handled
+ # like the all of the other languages.
+ if (
+ (cp >= 0x4E00 and cp <= 0x9FFF)
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
+ or (cp >= 0xF900 and cp <= 0xFAFF)
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
+ ): #
+ return True
+
+ return False
+
+ def _clean_text(self, text):
+ """Performs invalid character removal and whitespace cleanup on text."""
+ output = []
+ for char in text:
+ cp = ord(char)
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
+ continue
+ if _is_whitespace(char):
+ output.append(" ")
+ else:
+ output.append(char)
+ return "".join(output)
+
+
+# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer
+class WordpieceTokenizer(object):
+ """Runs WordPiece tokenization."""
+
+ def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
+ self.vocab = vocab
+ self.unk_token = unk_token
+ self.max_input_chars_per_word = max_input_chars_per_word
+
+ def tokenize(self, text):
+ """
+ Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
+ tokenization using the given vocabulary.
+
+ For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`.
+
+ Args:
+ text: A single token or whitespace separated tokens. This should have
+ already been passed through *BasicTokenizer*.
+
+ Returns:
+ A list of wordpiece tokens.
+ """
+
+ output_tokens = []
+ for token in whitespace_tokenize(text):
+ chars = list(token)
+ if len(chars) > self.max_input_chars_per_word:
+ output_tokens.append(self.unk_token)
+ continue
+
+ is_bad = False
+ start = 0
+ sub_tokens = []
+ while start < len(chars):
+ end = len(chars)
+ cur_substr = None
+ while start < end:
+ substr = "".join(chars[start:end])
+ if start > 0:
+ substr = "##" + substr
+ if substr in self.vocab:
+ cur_substr = substr
+ break
+ end -= 1
+ if cur_substr is None:
+ is_bad = True
+ break
+ sub_tokens.append(cur_substr)
+ start = end
+
+ if is_bad:
+ output_tokens.append(self.unk_token)
+ else:
+ output_tokens.extend(sub_tokens)
+ return output_tokens
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpnet/tokenization_mpnet_fast.py b/venv/lib/python3.10/site-packages/transformers/models/mpnet/tokenization_mpnet_fast.py
new file mode 100644
index 0000000000000000000000000000000000000000..433c3028fc20933bf739eec651f514434b554404
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mpnet/tokenization_mpnet_fast.py
@@ -0,0 +1,206 @@
+# coding=utf-8
+# Copyright 2018 The HuggingFace Inc. team, Microsoft Corporation.
+# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Fast Tokenization classes for MPNet."""
+
+import json
+from typing import List, Optional, Tuple
+
+from tokenizers import normalizers
+
+from ...tokenization_utils import AddedToken
+from ...tokenization_utils_fast import PreTrainedTokenizerFast
+from ...utils import logging
+from .tokenization_mpnet import MPNetTokenizer
+
+
+logger = logging.get_logger(__name__)
+
+VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
+
+
+class MPNetTokenizerFast(PreTrainedTokenizerFast):
+ r"""
+ Construct a "fast" MPNet tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
+
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
+ refer to this superclass for more information regarding those methods.
+
+ Args:
+ vocab_file (`str`):
+ File containing the vocabulary.
+ do_lower_case (`bool`, *optional*, defaults to `True`):
+ Whether or not to lowercase the input when tokenizing.
+ bos_token (`str`, *optional*, defaults to `""`):
+ The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the beginning of
+ sequence. The token used is the `cls_token`.
+
+
+
+ eos_token (`str`, *optional*, defaults to `""`):
+ The end of sequence token.
+
+
+
+ When building a sequence using special tokens, this is not the token that is used for the end of sequence.
+ The token used is the `sep_token`.
+
+
+
+ sep_token (`str`, *optional*, defaults to `""`):
+ The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
+ sequence classification or for a text and a question for question answering. It is also used as the last
+ token of a sequence built with special tokens.
+ cls_token (`str`, *optional*, defaults to `""`):
+ The classifier token which is used when doing sequence classification (classification of the whole sequence
+ instead of per-token classification). It is the first token of the sequence when built with special tokens.
+ unk_token (`str`, *optional*, defaults to `"[UNK]"`):
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
+ token instead.
+ pad_token (`str`, *optional*, defaults to `""`):
+ The token used for padding, for example when batching sequences of different lengths.
+ mask_token (`str`, *optional*, defaults to `""`):
+ The token used for masking values. This is the token used when training this model with masked language
+ modeling. This is the token which the model will try to predict.
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
+ Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
+ issue](https://github.com/huggingface/transformers/issues/328)).
+ strip_accents (`bool`, *optional*):
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
+ value for `lowercase` (as in the original BERT).
+ """
+
+ vocab_files_names = VOCAB_FILES_NAMES
+ slow_tokenizer_class = MPNetTokenizer
+ model_input_names = ["input_ids", "attention_mask"]
+
+ def __init__(
+ self,
+ vocab_file=None,
+ tokenizer_file=None,
+ do_lower_case=True,
+ bos_token="",
+ eos_token="",
+ sep_token="",
+ cls_token="",
+ unk_token="[UNK]",
+ pad_token="",
+ mask_token="",
+ tokenize_chinese_chars=True,
+ strip_accents=None,
+ **kwargs,
+ ):
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
+ sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token
+ cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
+ pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token
+
+ # Mask token behave like a normal word, i.e. include the space before it
+ mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token
+
+ super().__init__(
+ vocab_file,
+ tokenizer_file=tokenizer_file,
+ do_lower_case=do_lower_case,
+ bos_token=bos_token,
+ eos_token=eos_token,
+ sep_token=sep_token,
+ cls_token=cls_token,
+ unk_token=unk_token,
+ pad_token=pad_token,
+ mask_token=mask_token,
+ tokenize_chinese_chars=tokenize_chinese_chars,
+ strip_accents=strip_accents,
+ **kwargs,
+ )
+
+ pre_tok_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
+ if (
+ pre_tok_state.get("lowercase", do_lower_case) != do_lower_case
+ or pre_tok_state.get("strip_accents", strip_accents) != strip_accents
+ ):
+ pre_tok_class = getattr(normalizers, pre_tok_state.pop("type"))
+ pre_tok_state["lowercase"] = do_lower_case
+ pre_tok_state["strip_accents"] = strip_accents
+ self.backend_tokenizer.normalizer = pre_tok_class(**pre_tok_state)
+
+ self.do_lower_case = do_lower_case
+
+ @property
+ def mask_token(self) -> str:
+ """
+ `str`: Mask token, to use when training a model with masked-language modeling. Log an error if used while not
+ having been set.
+
+ MPNet tokenizer has a special mask token to be usable in the fill-mask pipeline. The mask token will greedily
+ comprise the space before the **.
+ """
+ if self._mask_token is None:
+ if self.verbose:
+ logger.error("Using mask_token, but it is not set yet.")
+ return None
+ return str(self._mask_token)
+
+ @mask_token.setter
+ def mask_token(self, value):
+ """
+ Overriding the default behavior of the mask token to have it eat the space before it.
+
+ This is needed to preserve backward compatibility with all the previously used models based on MPNet.
+ """
+ # Mask token behave like a normal word, i.e. include the space before it
+ # So we set lstrip to True
+ value = AddedToken(value, lstrip=True, rstrip=False) if isinstance(value, str) else value
+ self._mask_token = value
+
+ def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
+ output = [self.bos_token_id] + token_ids_0 + [self.eos_token_id]
+ if token_ids_1 is None:
+ return output
+
+ return output + [self.eos_token_id] + token_ids_1 + [self.eos_token_id]
+
+ def create_token_type_ids_from_sequences(
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
+ ) -> List[int]:
+ """
+ Creates a mask from the two sequences passed to be used in a sequence-pair classification task. MPNet does not
+ make use of token type ids, therefore a list of zeros is returned
+
+ Args:
+ token_ids_0 (`List[int]`):
+ List of ids.
+ token_ids_1 (`List[int]`, *optional*):
+ Optional second list of IDs for sequence pairs
+
+ Returns:
+ `List[int]`: List of zeros.
+ """
+ sep = [self.sep_token_id]
+ cls = [self.cls_token_id]
+
+ if token_ids_1 is None:
+ return len(cls + token_ids_0 + sep) * [0]
+ return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0]
+
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
+ return tuple(files)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpt/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/mpt/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d24a5fad7b9d2c9cae6de18871f22f4e52437fb1
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mpt/__init__.py
@@ -0,0 +1,62 @@
+# Copyright 2023 HuggingFace Inc. team and MosaicML NLP team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
+
+
+_import_structure = {
+ "configuration_mpt": ["MPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MptConfig", "MptOnnxConfig"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_mpt"] = [
+ "MPT_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "MptForCausalLM",
+ "MptModel",
+ "MptPreTrainedModel",
+ "MptForSequenceClassification",
+ "MptForTokenClassification",
+ "MptForQuestionAnswering",
+ ]
+
+if TYPE_CHECKING:
+ from .configuration_mpt import MPT_PRETRAINED_CONFIG_ARCHIVE_MAP, MptConfig, MptOnnxConfig
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_mpt import (
+ MPT_PRETRAINED_MODEL_ARCHIVE_LIST,
+ MptForCausalLM,
+ MptForQuestionAnswering,
+ MptForSequenceClassification,
+ MptForTokenClassification,
+ MptModel,
+ MptPreTrainedModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ad21c4ebaed808de200e557a502947595fe111ef
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/configuration_mpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/configuration_mpt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..48a58ce5a6e9905710eb33497b8dab65bc27eaf3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/configuration_mpt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/modeling_mpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/modeling_mpt.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3d4e3abb7f9e3664b31ed29fff127617e8653e02
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mpt/__pycache__/modeling_mpt.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpt/configuration_mpt.py b/venv/lib/python3.10/site-packages/transformers/models/mpt/configuration_mpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c1cb4d783b307bd47d0c8624e390d478db79aa2
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mpt/configuration_mpt.py
@@ -0,0 +1,246 @@
+# coding=utf-8
+# Copyright 2023 HuggingFace Inc. team and MosaicML NLP team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" Mpt configuration"""
+from typing import TYPE_CHECKING, Optional, Union
+
+
+if TYPE_CHECKING:
+ pass
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import MPT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class MptAttentionConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`MptAttention`] class. It is used to instantiate
+ attention layers according to the specified arguments, defining the layers architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the MPT
+ [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b) architecture. Most of the arguments are kept for backward
+ compatibility with previous MPT models that are hosted on the Hub (previously with `trust_remote_code=True`).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ attn_type (`str`, *optional*, defaults to `"multihead_attention"`):
+ type of attention to use. Options: `"multihead_attention"`, `"multiquery_attention"`.
+ attn_pdrop (`float`, *optional*, defaults to 0.0):
+ The dropout probability for the attention layers.
+ attn_impl (`str`, *optional*, defaults to `"torch"`):
+ The attention implementation to use. One of `"torch"`, `"flash"`, or `"triton"`.
+ clip_qkv (`float`, *optional*):
+ If not `None`, clip the queries, keys, and values in the attention layer to this value.
+ softmax_scale (`float`, *optional*, defaults to `None`):
+ If not `None`, scale the softmax in the attention layer by this value. If `None`, will default to
+ `1/sqrt(hidden_size)`.
+ prefix_lm (`bool`, *optional*, defaults to `False`)):
+ Whether the model should operate as a Prefix LM. This requires passing an extra `prefix_mask` argument
+ which indicates which tokens belong to the prefix. Tokens in the prefix can attend to one another
+ bi-directionally. Tokens outside the prefix use causal attention.
+ qk_ln (`bool`, *optional*, defaults to `False`):
+ Whether to apply layer normalization to the queries and keys in the attention layer.
+ attn_uses_sequence_id (`bool`, *optional*, defaults to `False`)):
+ Whether to restrict attention to tokens that have the same token_type_ids. When the model is in `train`
+ mode, this requires passing an extra *token_type_ids* argument which indicates which sub-sequence each
+ token belongs to. Defaults to `False` meaning any provided *token_type_ids* will be ignored.
+ alibi (`bool`, *optional*, defaults to `True`):
+ Whether or not to use the alibi bias instead of positional embedding.
+ alibi_bias_max (`int`, *optional*, defaults to 8):
+ The maximum value of the alibi bias.
+ """
+
+ def __init__(
+ self,
+ attn_type="multihead_attention",
+ attn_pdrop=0,
+ attn_impl="torch",
+ clip_qkv=None,
+ softmax_scale=None,
+ prefix_lm=False,
+ qk_ln=False,
+ attn_uses_sequence_id=False,
+ alibi=True,
+ alibi_bias_max=8,
+ **kwargs,
+ ):
+ super().__init__()
+ self.attn_type = attn_type
+ self.attn_pdrop = attn_pdrop
+ self.attn_impl = attn_impl
+ self.clip_qkv = clip_qkv
+ self.softmax_scale = softmax_scale
+ self.prefix_lm = prefix_lm
+ self.attn_uses_sequence_id = attn_uses_sequence_id
+ self.alibi = alibi
+ self.qk_ln = qk_ln
+ self.alibi_bias_max = alibi_bias_max
+
+ if attn_type not in ["multihead_attention", "multiquery_attention"]:
+ raise ValueError(
+ f"`attn_type` has to be either `multihead_attention` or `multiquery_attention`. Received: {attn_type}"
+ )
+
+ @classmethod
+ def from_pretrained(cls, pretrained_model_name_or_path, **kwargs) -> "PretrainedConfig":
+ cls._set_token_in_kwargs(kwargs)
+
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
+
+ if config_dict.get("model_type") == "mpt":
+ config_dict = config_dict["attn_config"]
+
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
+ logger.warning(
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
+ )
+
+ return cls.from_dict(config_dict, **kwargs)
+
+
+class MptConfig(PretrainedConfig):
+ """
+ This is the configuration class to store the configuration of a [`MptModel`]. It is used to instantiate a Mpt model
+ according to the specified arguments, defining the model architecture. Instantiating a configuration with the
+ defaults will yield a similar configuration to the Mpt-7b architecture
+ [mosaicml/mpt-7b](https://huggingface.co/mosaicml/mpt-7b).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+
+ Args:
+ d_model (`int`, *optional*, defaults to 2048):
+ Dimensionality of the embeddings and hidden states.
+ n_heads (`int`, *optional*, defaults to 16):
+ Number of attention heads for each attention layer in the Transformer encoder.
+ n_layers (`int`, *optional*, defaults to 24):
+ Number of hidden layers in the Transformer encoder.
+ expansion_ratio (`int`, *optional*, defaults to 4):
+ The ratio of the up/down scale in the MLP.
+ max_seq_len (`int`, *optional*, defaults to 2048):
+ The maximum sequence length of the model.
+ vocab_size (`int`, *optional*, defaults to 50368):
+ Vocabulary size of the Mpt model. Defines the maximum number of different tokens that can be represented by
+ the `inputs_ids` passed when calling [`MptModel`]. Check [this
+ discussion](https://huggingface.co/bigscience/mpt/discussions/120#633d28389addb8530b406c2a) on how the
+ `vocab_size` has been defined.
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
+ The dropout probability applied to the attention output before combining with residual.
+ layer_norm_epsilon (`float`, *optional*, defaults to 1e-05):
+ The epsilon to use in the layer normalization layers.
+ emb_pdrop (`float`, *optional*, defaults to 0.0):
+ The dropout probability for the embedding layer.
+ learned_pos_emb (`bool`, *optional*, defaults to `True`):
+ Whether to use learned positional embeddings.
+ attn_config (`dict`, *optional*):
+ A dictionary used to configure the model's attention module.
+ init_device (`str`, *optional*, defaults to `"cpu"`):
+ The device to use for parameter initialization. Defined for backward compatibility
+ logit_scale (`float`, *optional*):
+ If not None, scale the logits by this value.
+ no_bias (`bool`, *optional*, defaults to `True`):
+ Whether to use bias in all linear layers.
+ verbose (`int`, *optional*, defaults to 0):
+ The verbosity level to use for logging. Used in the previous versions of MPT models for logging. This
+ argument is deprecated.
+ embedding_fraction (`float`, *optional*, defaults to 1.0):
+ The fraction to scale the gradients of the embedding layer by.
+ norm_type (`str`, *optional*, defaults to `"low_precision_layernorm"`):
+ Type of layer norm to use. All MPT models uses the same layer norm implementation. Defined for backward
+ compatibility.
+ use_cache (`bool`, *optional*, defaults to `False`):
+ Whether or not the model should return the last key/values attentions (not used by all models).
+ initializer_range (`float`, *optional*, defaults to 0.02):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+
+ Example:
+
+ ```python
+ >>> from transformers import MptConfig, MptModel
+
+ >>> # Initializing a Mpt configuration
+ >>> configuration = MptConfig()
+
+ >>> # Initializing a model (with random weights) from the configuration
+ >>> model = MptModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```
+ """
+
+ model_type = "mpt"
+ attribute_map = {
+ "num_attention_heads": "n_heads",
+ "hidden_size": "d_model",
+ "num_hidden_layers": "n_layers",
+ }
+
+ def __init__(
+ self,
+ d_model: int = 2048,
+ n_heads: int = 16,
+ n_layers: int = 24,
+ expansion_ratio: int = 4,
+ max_seq_len: int = 2048,
+ vocab_size: int = 50368,
+ resid_pdrop: float = 0.0,
+ layer_norm_epsilon: float = 1e-5,
+ emb_pdrop: float = 0.0,
+ learned_pos_emb: bool = True,
+ attn_config: MptAttentionConfig = None,
+ init_device: str = "cpu",
+ logit_scale: Optional[Union[float, str]] = None,
+ no_bias: bool = True,
+ verbose: int = 0,
+ embedding_fraction: float = 1.0,
+ norm_type: str = "low_precision_layernorm",
+ use_cache: bool = False,
+ initializer_range=0.02,
+ **kwargs,
+ ):
+ if attn_config is None:
+ self.attn_config = MptAttentionConfig()
+ elif isinstance(attn_config, dict):
+ self.attn_config = MptAttentionConfig(**attn_config)
+ else:
+ self.attn_config = attn_config
+ self.d_model = d_model
+ self.n_heads = n_heads
+ self.n_layers = n_layers
+ self.expansion_ratio = expansion_ratio
+ self.max_seq_len = max_seq_len
+ self.vocab_size = vocab_size
+ self.resid_pdrop = resid_pdrop
+ self.emb_pdrop = emb_pdrop
+ self.learned_pos_emb = learned_pos_emb
+ self.init_device = init_device
+ self.logit_scale = logit_scale
+ self.no_bias = no_bias
+ self.verbose = verbose
+ self.embedding_fraction = embedding_fraction
+ self.norm_type = norm_type
+ self.layer_norm_epsilon = layer_norm_epsilon
+ self.use_cache = use_cache
+ self.initializer_range = initializer_range
+ super().__init__(**kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/mpt/modeling_mpt.py b/venv/lib/python3.10/site-packages/transformers/models/mpt/modeling_mpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..864e9c09ca3cb72fb4976d58d324c246b7b27034
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/mpt/modeling_mpt.py
@@ -0,0 +1,942 @@
+# coding=utf-8
+# Copyright 2023 HuggingFace Inc. team and MosaicML NLP team.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""PyTorch MPT model."""
+
+import math
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
+from torch.nn import functional as F
+
+from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
+from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
+from ...modeling_outputs import (
+ BaseModelOutputWithPastAndCrossAttentions,
+ CausalLMOutputWithCrossAttentions,
+ QuestionAnsweringModelOutput,
+ SequenceClassifierOutputWithPast,
+ TokenClassifierOutput,
+)
+from ...modeling_utils import PreTrainedModel
+from ...utils import logging
+from .configuration_mpt import MptConfig
+
+
+logger = logging.get_logger(__name__)
+
+_CHECKPOINT_FOR_DOC = "mosaicml/mpt-7b"
+_CONFIG_FOR_DOC = "MptConfig"
+
+
+from ..deprecated._archive_maps import MPT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+def build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max=8, device=None):
+ r"""
+ Link to paper: https://arxiv.org/abs/2108.12409 - Alibi tensor is not causal as the original paper mentions, it
+ relies on a translation invariance of softmax for quick implementation. This implementation has been copied from
+ the alibi implementation of MPT source code that led to slightly different results than the Bloom alibi:
+ https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L292
+ """
+ alibi = torch.arange(1 - sequence_length, 1, dtype=torch.int32, device=device).view(1, 1, 1, sequence_length)
+ num_heads_power_of_2 = 2 ** math.ceil(math.log2(num_heads))
+
+ base = torch.arange(1, num_heads_power_of_2 + 1, dtype=torch.int64, device=device).float()
+ base = base * (alibi_bias_max / num_heads_power_of_2)
+
+ slopes = 1.0 / torch.pow(2, base)
+ slopes = slopes.view(1, num_heads_power_of_2, 1, 1)
+
+ if num_heads_power_of_2 != num_heads:
+ slopes = torch.concat([slopes[:, 1::2, ...], slopes[:, ::2, ...]], dim=1)[:, :num_heads, ...]
+
+ alibi = alibi * slopes
+ return alibi.squeeze(0)
+
+
+class MptAttention(nn.Module):
+ """Multi-head self attention.
+ Using torch or triton attention implemetation enables user to also use additive bias.
+ """
+
+ def __init__(self, config: MptConfig):
+ super().__init__()
+ self.hidden_size = config.hidden_size
+ self.n_heads = config.n_heads
+ self.max_seq_length = config.max_seq_len
+ self.head_dim = self.hidden_size // self.n_heads
+ self.softmax_scale = config.attn_config.softmax_scale
+ if self.softmax_scale is None:
+ self.softmax_scale = 1 / math.sqrt(self.hidden_size / self.n_heads)
+
+ self.attn_dropout_p = config.attn_config.attn_pdrop
+ self.Wqkv = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=False)
+ self.out_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_bias: torch.Tensor,
+ past_key_value: Optional[Tuple[torch.Tensor]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ ):
+ batch_size, seq_length = hidden_states.shape[:2]
+
+ mixed_qkv = self.Wqkv(hidden_states)
+ query_states, key_states, value_states = mixed_qkv.chunk(3, dim=2)
+ query_states = query_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
+ key_states = key_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
+ value_states = value_states.reshape(batch_size, seq_length, self.n_heads, self.head_dim).transpose(1, 2)
+
+ if past_key_value is not None:
+ if len(past_key_value) != 0:
+ key_states = torch.cat([past_key_value[0], key_states], dim=2)
+ value_states = torch.cat([past_key_value[1], value_states], dim=2)
+ past_key_value = (key_states, value_states)
+ else:
+ past_key_value = (key_states, value_states)
+
+ attention_scores = torch.matmul(query_states, key_states.transpose(-1, -2)) * self.softmax_scale
+
+ query_length = seq_length if past_key_value is None else seq_length + past_key_value[0].shape[2]
+
+ if position_bias is not None:
+ if len(position_bias.shape) != 3:
+ raise ValueError(f"Expecting position_bias shape to be 3 dimensions, got {len(position_bias.shape)}")
+ key_length = key_states.shape[-2]
+
+ position_bias_query_index = max(0, position_bias.size(1) - query_length)
+ position_bias_key_index = max(0, position_bias.size(2) - key_length)
+
+ position_bias = position_bias[:, position_bias_query_index:, position_bias_key_index:]
+
+ attention_scores = attention_scores + position_bias
+
+ if attention_mask is not None:
+ attention_scores = attention_scores.masked_fill(attention_mask, torch.finfo(query_states.dtype).min)
+
+ # (batch_size, n_heads, seq_length, key_length)
+ attn_weights = nn.functional.softmax(attention_scores.float(), dim=-1).to(value_states.dtype)
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attn_dropout_p, training=self.training)
+
+ context_states = torch.matmul(attn_weights, value_states)
+ context_states = context_states.permute(0, 2, 1, 3).contiguous().view(batch_size, seq_length, -1)
+ attn_output = self.out_proj(context_states)
+
+ return attn_output, attn_weights, past_key_value
+
+
+class MptMLP(nn.Module):
+ def __init__(self, config: MptConfig):
+ super().__init__()
+ hidden_size = config.hidden_size
+
+ self.up_proj = nn.Linear(hidden_size, 4 * hidden_size, bias=False)
+ self.act = nn.GELU(approximate="none")
+ self.down_proj = nn.Linear(4 * hidden_size, hidden_size, bias=False)
+ self.hidden_dropout = config.attn_config.attn_pdrop
+
+ def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
+ hidden_states = self.act(self.up_proj(hidden_states))
+
+ intermediate_output = self.down_proj(hidden_states)
+
+ output = F.dropout(intermediate_output, p=self.hidden_dropout, training=self.training)
+ output = output + residual
+
+ return output
+
+
+class MptBlock(nn.Module):
+ def __init__(self, config: MptConfig):
+ super().__init__()
+ hidden_size = config.hidden_size
+
+ self.norm_1 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
+ # backward compatibility with weights on the Hub
+ self.norm_1.bias = None
+
+ self.num_heads = config.n_heads
+ self.attn = MptAttention(config)
+
+ self.norm_2 = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
+ # backward compatibility with weights on the Hub
+ self.norm_2.bias = None
+
+ self.ffn = MptMLP(config)
+
+ self.dropout_rate = config.attn_config.attn_pdrop
+ self.resid_attn_dropout = nn.Dropout(self.dropout_rate)
+
+ def forward(
+ self,
+ hidden_states: torch.Tensor,
+ position_bias: torch.Tensor,
+ attention_mask: torch.Tensor,
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
+ use_cache: bool = False,
+ output_attentions: bool = False,
+ ):
+ # hidden_states: [batch_size, seq_length, hidden_size]
+ # Layer norm at the beginning of the transformer layer.
+ layernorm_output = self.norm_1(hidden_states)
+
+ residual = hidden_states
+
+ # Self attention.
+ attn_outputs, attn_weights, past_key_value = self.attn(
+ layernorm_output,
+ position_bias=position_bias,
+ attention_mask=attention_mask,
+ past_key_value=layer_past,
+ )
+
+ hidden_states = self.resid_attn_dropout(attn_outputs) + residual
+
+ layernorm_output = self.norm_2(hidden_states)
+
+ # Get residual
+ residual = hidden_states
+
+ # MLP.
+ output = self.ffn(layernorm_output, residual)
+ outputs = (output,)
+
+ if use_cache:
+ outputs += (past_key_value,)
+
+ if output_attentions:
+ outputs += (attn_weights,)
+
+ return outputs # hidden_states, present, attentions
+
+
+class MptPreTrainedModel(PreTrainedModel):
+ config_class = MptConfig
+ base_model_prefix = "transformer"
+ supports_gradient_checkpointing = True
+ _no_split_modules = ["MptBlock"]
+ _keys_to_ignore_on_load_missing = [r"lm_head.*."]
+
+ def __init__(self, *inputs, **kwargs):
+ super().__init__(*inputs, **kwargs)
+
+ def _init_weights(self, module: nn.Module):
+ """Initialize the weights."""
+ if isinstance(module, nn.Linear):
+ # Slightly different from the TF version which uses truncated_normal for initialization
+ # cf https://github.com/pytorch/pytorch/pull/5617
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+ elif isinstance(module, nn.Embedding):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.padding_idx is not None:
+ module.weight.data[module.padding_idx].zero_()
+ elif isinstance(module, LayerNorm):
+ if module.bias is not None:
+ module.bias.data.zero_()
+ module.weight.data.fill_(1.0)
+
+ @staticmethod
+ def _convert_to_mpt_cache(
+ past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]],
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]:
+ """
+ Converts the cache to the format expected by Mpt, i.e. to tuple(tuple([batch_size * num_heads, ...]))
+ """
+ batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape
+ batch_size_times_num_heads = batch_size * num_heads
+ # key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length]
+ # value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim]
+ return tuple(
+ (
+ layer_past[0].reshape(batch_size_times_num_heads, head_dim, seq_length),
+ layer_past[1].reshape(batch_size_times_num_heads, seq_length, head_dim),
+ )
+ for layer_past in past_key_value
+ )
+
+
+MPT_START_DOCSTRING = r"""
+
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`MptConfig`]): Model configuration class with all the parameters of the model.
+ Initializing with a config file does not load the weights associated with the model, only the
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+MPT_INPUTS_DOCSTRING = r"""
+ Args:
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]`
+ (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
+
+ If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
+ `input_ids`.
+
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
+ [`PreTrainedTokenizer.__call__`] for details.
+
+ [What are input IDs?](../glossary#input-ids)
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
+
+ Each element of `past_key_values` is a tuple (past_key, past_value):
+ - past_key: [batch_size * num_heads, head_dim, kv_length]
+ - past_value: [batch_size * num_heads, kv_length, head_dim]
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**,
+ - 0 for tokens that are **masked**.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
+ model's internal embedding lookup matrix.
+
+ If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
+ `past_key_values`).
+ use_cache (`bool`, *optional*):
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
+ `past_key_values`).
+ output_attentions (`bool`, *optional*):
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
+ tensors for more detail.
+ output_hidden_states (`bool`, *optional*):
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
+ more detail.
+ return_dict (`bool`, *optional*):
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ "The bare Mpt Model transformer outputting raw hidden-states without any specific head on top.",
+ MPT_START_DOCSTRING,
+)
+class MptModel(MptPreTrainedModel):
+ def __init__(self, config: MptConfig):
+ super().__init__(config)
+
+ self.hidden_size = config.hidden_size
+ self.num_heads = config.n_heads
+
+ # Embedding + LN Embedding
+ self.wte = nn.Embedding(config.vocab_size, self.hidden_size)
+
+ # Transformer blocks
+ self.blocks = nn.ModuleList([MptBlock(config) for _ in range(config.n_layers)])
+
+ # Final Layer Norm
+ self.norm_f = LayerNorm(self.hidden_size, eps=config.layer_norm_epsilon)
+ # backward compatibility with weights on the Hub
+ self.norm_f.bias = None
+
+ self.gradient_checkpointing = False
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_input_embeddings(self):
+ return self.wte
+
+ def build_mpt_alibi_tensor(self, num_heads, sequence_length, alibi_bias_max=8, device=None):
+ return build_mpt_alibi_tensor(num_heads, sequence_length, alibi_bias_max, device)
+
+ def set_input_embeddings(self, new_embeddings: torch.Tensor):
+ self.wte = new_embeddings
+
+ @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.LongTensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
+ output_hidden_states = (
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
+ )
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ if input_ids is not None and inputs_embeds is not None:
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
+ elif input_ids is not None:
+ batch_size, seq_length = input_ids.shape
+ elif inputs_embeds is not None:
+ batch_size, seq_length, _ = inputs_embeds.shape
+ else:
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
+
+ if past_key_values is None:
+ past_key_values = tuple([None] * len(self.blocks))
+
+ if inputs_embeds is None:
+ inputs_embeds = self.wte(input_ids)
+
+ hidden_states = inputs_embeds
+
+ presents = () if use_cache else None
+ all_self_attentions = () if output_attentions else None
+ all_hidden_states = () if output_hidden_states else None
+
+ if self.gradient_checkpointing and self.training:
+ if use_cache:
+ logger.warning_once(
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
+ )
+ use_cache = False
+
+ # Compute alibi tensor: check build_alibi_tensor documentation
+ seq_length_with_past = seq_length
+ past_key_values_length = 0
+ if past_key_values[0] is not None:
+ past_key_values_length = past_key_values[0][0].shape[2]
+ seq_length_with_past = seq_length_with_past + past_key_values_length
+ if attention_mask is None:
+ attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
+ else:
+ attention_mask = attention_mask.to(hidden_states.device)
+
+ alibi = self.build_mpt_alibi_tensor(self.num_heads, self.config.max_seq_len, device=hidden_states.device)
+
+ causal_mask = _prepare_4d_causal_attention_mask(
+ attention_mask, (batch_size, seq_length), inputs_embeds, past_key_values_length
+ )
+ causal_mask = causal_mask.bool()
+
+ for block, layer_past in zip(self.blocks, past_key_values):
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if self.gradient_checkpointing and self.training:
+ outputs = self._gradient_checkpointing_func(
+ block.__call__,
+ hidden_states,
+ alibi,
+ causal_mask,
+ layer_past,
+ use_cache,
+ output_attentions,
+ )
+ else:
+ outputs = block(
+ hidden_states,
+ layer_past=layer_past,
+ attention_mask=causal_mask,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ position_bias=alibi,
+ )
+
+ hidden_states = outputs[0]
+ if use_cache is True:
+ presents = presents + (outputs[1],)
+
+ if output_attentions:
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
+
+ # Add last hidden state
+ hidden_states = self.norm_f(hidden_states)
+
+ if output_hidden_states:
+ all_hidden_states = all_hidden_states + (hidden_states,)
+
+ if not return_dict:
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
+
+ return BaseModelOutputWithPastAndCrossAttentions(
+ last_hidden_state=hidden_states,
+ past_key_values=presents,
+ hidden_states=all_hidden_states,
+ attentions=all_self_attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The MPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
+ embeddings).
+ """,
+ MPT_START_DOCSTRING,
+)
+class MptForCausalLM(MptPreTrainedModel):
+ _tied_weights_keys = ["lm_head.weight"]
+
+ def __init__(self, config: MptConfig):
+ super().__init__(config)
+ self.transformer = MptModel(config)
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ def get_output_embeddings(self):
+ return self.lm_head
+
+ def set_output_embeddings(self, new_embeddings: torch.Tensor):
+ self.lm_head = new_embeddings
+
+ def prepare_inputs_for_generation(
+ self,
+ input_ids: torch.LongTensor,
+ past_key_values: Optional[torch.Tensor] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ **kwargs,
+ ) -> dict:
+ # only last tokens for input_ids if past is not None
+ if past_key_values is not None:
+ past_length = past_key_values[0][0].shape[2]
+
+ # Some generation methods already pass only the last input ID
+ if input_ids.shape[1] > past_length:
+ remove_prefix_length = past_length
+ else:
+ # Default to old behavior: keep only final ID
+ remove_prefix_length = input_ids.shape[1] - 1
+
+ input_ids = input_ids[:, remove_prefix_length:]
+
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
+ if inputs_embeds is not None and past_key_values is None:
+ model_inputs = {"inputs_embeds": inputs_embeds}
+ else:
+ model_inputs = {"input_ids": input_ids}
+
+ model_inputs.update(
+ {
+ "past_key_values": past_key_values, # NITS should it be layer_past?
+ "use_cache": use_cache,
+ "attention_mask": attention_mask,
+ }
+ )
+ return model_inputs
+
+ @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=CausalLMOutputWithCrossAttentions,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+ hidden_states = transformer_outputs[0]
+
+ lm_logits = self.lm_head(hidden_states)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(lm_logits.device)
+ # Shift so that tokens < n predict n
+ shift_logits = lm_logits[..., :-1, :].contiguous()
+ shift_labels = labels[..., 1:].contiguous()
+ batch_size, seq_length, vocab_size = shift_logits.shape
+ # Flatten the tokens
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(
+ shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)
+ )
+
+ if not return_dict:
+ output = (lm_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return CausalLMOutputWithCrossAttentions(
+ loss=loss,
+ logits=lm_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+ def _reorder_cache(
+ self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
+ """
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
+ beam_idx at every generation step.
+
+ Output shares the same memory storage as `past`.
+ """
+ # Get a copy of `beam_idx` on all the devices where we need those indices.
+ device_to_beam_idx = {
+ past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past
+ }
+ reordered_past = tuple(
+ (
+ layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]),
+ layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]),
+ )
+ for layer_past in past
+ )
+ return reordered_past
+
+
+@add_start_docstrings(
+ """
+ The MPT Model transformer with a sequence classification head on top (linear layer).
+
+ [`MptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
+ (e.g. GPT-1) do.
+
+ Since it does classification on the last token, it requires to know the position of the last token. If a
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
+ each row of the batch).
+ """,
+ MPT_START_DOCSTRING,
+)
+class MptForSequenceClassification(MptPreTrainedModel):
+ def __init__(self, config: MptConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+ self.transformer = MptModel(config)
+ self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=SequenceClassifierOutputWithPast,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = transformer_outputs[0]
+ logits = self.score(hidden_states)
+
+ if input_ids is not None:
+ batch_size = input_ids.shape[0]
+ else:
+ batch_size = inputs_embeds.shape[0]
+
+ if self.config.pad_token_id is None and batch_size != 1:
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
+ if self.config.pad_token_id is None:
+ sequence_lengths = -1
+ else:
+ if input_ids is not None:
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
+ sequence_lengths = sequence_lengths.to(logits.device)
+ else:
+ sequence_lengths = -1
+ logger.warning(
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
+ )
+
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
+
+ loss = None
+ if labels is not None:
+ if self.config.problem_type is None:
+ if self.num_labels == 1:
+ self.config.problem_type = "regression"
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
+ self.config.problem_type = "single_label_classification"
+ else:
+ self.config.problem_type = "multi_label_classification"
+
+ if self.config.problem_type == "regression":
+ loss_fct = MSELoss()
+ if self.num_labels == 1:
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
+ else:
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "single_label_classification":
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(pooled_logits, labels)
+ elif self.config.problem_type == "multi_label_classification":
+ loss_fct = BCEWithLogitsLoss()
+ loss = loss_fct(pooled_logits, labels)
+ if not return_dict:
+ output = (pooled_logits,) + transformer_outputs[1:]
+ return ((loss,) + output) if loss is not None else output
+
+ return SequenceClassifierOutputWithPast(
+ loss=loss,
+ logits=pooled_logits,
+ past_key_values=transformer_outputs.past_key_values,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ MPT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
+ Named-Entity-Recognition (NER) tasks.
+ """,
+ MPT_START_DOCSTRING,
+)
+class MptForTokenClassification(MptPreTrainedModel):
+ def __init__(self, config: MptConfig):
+ super().__init__(config)
+ self.num_labels = config.num_labels
+
+ self.transformer = MptModel(config)
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
+ classifier_dropout = config.classifier_dropout
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
+ classifier_dropout = config.hidden_dropout
+ else:
+ classifier_dropout = 0.1
+ self.dropout = nn.Dropout(classifier_dropout)
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING)
+ @add_code_sample_docstrings(
+ checkpoint=_CHECKPOINT_FOR_DOC,
+ output_type=TokenClassifierOutput,
+ config_class=_CONFIG_FOR_DOC,
+ )
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
+ attention_mask: Optional[torch.Tensor] = None,
+ inputs_embeds: Optional[torch.Tensor] = None,
+ labels: Optional[torch.Tensor] = None,
+ use_cache: Optional[bool] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ **deprecated_arguments,
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
+ r"""
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ transformer_outputs = self.transformer(
+ input_ids,
+ past_key_values=past_key_values,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ use_cache=use_cache,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ hidden_states = transformer_outputs[0]
+ hidden_states = self.dropout(hidden_states)
+ logits = self.classifier(hidden_states)
+
+ loss = None
+ if labels is not None:
+ # move labels to correct device to enable model parallelism
+ labels = labels.to(logits.device)
+ batch_size, seq_length = labels.shape
+ loss_fct = CrossEntropyLoss()
+ loss = loss_fct(
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
+ )
+
+ if not return_dict:
+ output = (logits,) + transformer_outputs[2:]
+ return ((loss,) + output) if loss is not None else output
+
+ return TokenClassifierOutput(
+ loss=loss,
+ logits=logits,
+ hidden_states=transformer_outputs.hidden_states,
+ attentions=transformer_outputs.attentions,
+ )
+
+
+@add_start_docstrings(
+ """
+ The MPT Model transformer with a span classification head on top for extractive question-answering tasks like SQuAD
+ (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
+ """,
+ MPT_START_DOCSTRING,
+)
+class MptForQuestionAnswering(MptPreTrainedModel):
+ def __init__(self, config):
+ super().__init__(config)
+ self.transformer = MptModel(config)
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(MPT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
+ def forward(
+ self,
+ input_ids: Optional[torch.LongTensor] = None,
+ attention_mask: Optional[torch.FloatTensor] = None,
+ inputs_embeds: Optional[torch.FloatTensor] = None,
+ start_positions: Optional[torch.LongTensor] = None,
+ end_positions: Optional[torch.LongTensor] = None,
+ output_attentions: Optional[bool] = None,
+ output_hidden_states: Optional[bool] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
+ r"""
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
+ are not taken into account for computing the loss.
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ outputs = self.transformer(
+ input_ids,
+ attention_mask=attention_mask,
+ inputs_embeds=inputs_embeds,
+ output_attentions=output_attentions,
+ output_hidden_states=output_hidden_states,
+ return_dict=return_dict,
+ )
+
+ sequence_output = outputs[0]
+
+ logits = self.qa_outputs(sequence_output)
+ start_logits, end_logits = logits.split(1, dim=-1)
+ start_logits = start_logits.squeeze(-1).contiguous()
+ end_logits = end_logits.squeeze(-1).contiguous()
+
+ total_loss = None
+ if start_positions is not None and end_positions is not None:
+ # If we are on multi-GPU, split add a dimension
+ if len(start_positions.size()) > 1:
+ start_positions = start_positions.squeeze(-1)
+ if len(end_positions.size()) > 1:
+ end_positions = end_positions.squeeze(-1)
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
+ ignored_index = start_logits.size(1)
+ start_positions = start_positions.clamp(0, ignored_index)
+ end_positions = end_positions.clamp(0, ignored_index)
+
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
+ start_loss = loss_fct(start_logits, start_positions)
+ end_loss = loss_fct(end_logits, end_positions)
+ total_loss = (start_loss + end_loss) / 2
+
+ if not return_dict:
+ output = (start_logits, end_logits) + outputs[2:]
+ return ((total_loss,) + output) if total_loss is not None else output
+
+ return QuestionAnsweringModelOutput(
+ loss=total_loss,
+ start_logits=start_logits,
+ end_logits=end_logits,
+ hidden_states=outputs.hidden_states,
+ attentions=outputs.attentions,
+ )
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..38b10ca7de6542639bbc31519f1f776e196fc6d2
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/__init__.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/tokenization_tapas.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/tokenization_tapas.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1abf5c3e178ff949d1f58d83a9fe3f5dedb869f3
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/tapas/__pycache__/tokenization_tapas.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/univnet/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/univnet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..afb03ee9894b0ebe58c9ec864e3eccb32719b993
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/univnet/__init__.py
@@ -0,0 +1,65 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import TYPE_CHECKING
+
+from ...utils import (
+ OptionalDependencyNotAvailable,
+ _LazyModule,
+ is_torch_available,
+)
+
+
+_import_structure = {
+ "configuration_univnet": [
+ "UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP",
+ "UnivNetConfig",
+ ],
+ "feature_extraction_univnet": ["UnivNetFeatureExtractor"],
+}
+
+try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+except OptionalDependencyNotAvailable:
+ pass
+else:
+ _import_structure["modeling_univnet"] = [
+ "UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST",
+ "UnivNetModel",
+ ]
+
+
+if TYPE_CHECKING:
+ from .configuration_univnet import (
+ UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
+ UnivNetConfig,
+ )
+ from .feature_extraction_univnet import UnivNetFeatureExtractor
+
+ try:
+ if not is_torch_available():
+ raise OptionalDependencyNotAvailable()
+ except OptionalDependencyNotAvailable:
+ pass
+ else:
+ from .modeling_univnet import (
+ UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST,
+ UnivNetModel,
+ )
+
+else:
+ import sys
+
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/univnet/__pycache__/feature_extraction_univnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/univnet/__pycache__/feature_extraction_univnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..93f3506c8818b46505b9bc41bd8ab04e2a367263
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/univnet/__pycache__/feature_extraction_univnet.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/univnet/__pycache__/modeling_univnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/univnet/__pycache__/modeling_univnet.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d809b9050e66b6729e06b7e7b94c22a2a7e5180b
Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/univnet/__pycache__/modeling_univnet.cpython-310.pyc differ
diff --git a/venv/lib/python3.10/site-packages/transformers/models/univnet/configuration_univnet.py b/venv/lib/python3.10/site-packages/transformers/models/univnet/configuration_univnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..933db21d5ae3814441b1d4fb324fc601df329d74
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/univnet/configuration_univnet.py
@@ -0,0 +1,125 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" UnivNetModel model configuration"""
+
+from ...configuration_utils import PretrainedConfig
+from ...utils import logging
+
+
+logger = logging.get_logger(__name__)
+
+
+from ..deprecated._archive_maps import UNIVNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
+
+
+class UnivNetConfig(PretrainedConfig):
+ r"""
+ This is the configuration class to store the configuration of a [`UnivNetModel`]. It is used to instantiate a
+ UnivNet vocoder model according to the specified arguments, defining the model architecture. Instantiating a
+ configuration with the defaults will yield a similar configuration to that of the UnivNet
+ [dg845/univnet-dev](https://huggingface.co/dg845/univnet-dev) architecture, which corresponds to the 'c32'
+ architecture in [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/master/config/default_c32.yaml).
+
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
+ documentation from [`PretrainedConfig`] for more information.
+
+ Args:
+ model_in_channels (`int`, *optional*, defaults to 64):
+ The number of input channels for the UnivNet residual network. This should correspond to
+ `noise_sequence.shape[1]` and the value used in the [`UnivNetFeatureExtractor`] class.
+ model_hidden_channels (`int`, *optional*, defaults to 32):
+ The number of hidden channels of each residual block in the UnivNet residual network.
+ num_mel_bins (`int`, *optional*, defaults to 100):
+ The number of frequency bins in the conditioning log-mel spectrogram. This should correspond to the value
+ used in the [`UnivNetFeatureExtractor`] class.
+ resblock_kernel_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[3, 3, 3]`):
+ A tuple of integers defining the kernel sizes of the 1D convolutional layers in the UnivNet residual
+ network. The length of `resblock_kernel_sizes` defines the number of resnet blocks and should match that of
+ `resblock_stride_sizes` and `resblock_dilation_sizes`.
+ resblock_stride_sizes (`Tuple[int]` or `List[int]`, *optional*, defaults to `[8, 8, 4]`):
+ A tuple of integers defining the stride sizes of the 1D convolutional layers in the UnivNet residual
+ network. The length of `resblock_stride_sizes` should match that of `resblock_kernel_sizes` and
+ `resblock_dilation_sizes`.
+ resblock_dilation_sizes (`Tuple[Tuple[int]]` or `List[List[int]]`, *optional*, defaults to `[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]]`):
+ A nested tuple of integers defining the dilation rates of the dilated 1D convolutional layers in the
+ UnivNet residual network. The length of `resblock_dilation_sizes` should match that of
+ `resblock_kernel_sizes` and `resblock_stride_sizes`. The length of each nested list in
+ `resblock_dilation_sizes` defines the number of convolutional layers per resnet block.
+ kernel_predictor_num_blocks (`int`, *optional*, defaults to 3):
+ The number of residual blocks in the kernel predictor network, which calculates the kernel and bias for
+ each location variable convolution layer in the UnivNet residual network.
+ kernel_predictor_hidden_channels (`int`, *optional*, defaults to 64):
+ The number of hidden channels for each residual block in the kernel predictor network.
+ kernel_predictor_conv_size (`int`, *optional*, defaults to 3):
+ The kernel size of each 1D convolutional layer in the kernel predictor network.
+ kernel_predictor_dropout (`float`, *optional*, defaults to 0.0):
+ The dropout probability for each residual block in the kernel predictor network.
+ initializer_range (`float`, *optional*, defaults to 0.01):
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
+ leaky_relu_slope (`float`, *optional*, defaults to 0.2):
+ The angle of the negative slope used by the leaky ReLU activation.
+
+ Example:
+
+ ```python
+ >>> from transformers import UnivNetModel, UnivNetConfig
+
+ >>> # Initializing a Tortoise TTS style configuration
+ >>> configuration = UnivNetConfig()
+
+ >>> # Initializing a model (with random weights) from the Tortoise TTS style configuration
+ >>> model = UnivNetModel(configuration)
+
+ >>> # Accessing the model configuration
+ >>> configuration = model.config
+ ```
+ """
+
+ model_type = "univnet"
+
+ def __init__(
+ self,
+ model_in_channels=64,
+ model_hidden_channels=32,
+ num_mel_bins=100,
+ resblock_kernel_sizes=[3, 3, 3],
+ resblock_stride_sizes=[8, 8, 4],
+ resblock_dilation_sizes=[[1, 3, 9, 27], [1, 3, 9, 27], [1, 3, 9, 27]],
+ kernel_predictor_num_blocks=3,
+ kernel_predictor_hidden_channels=64,
+ kernel_predictor_conv_size=3,
+ kernel_predictor_dropout=0.0,
+ initializer_range=0.01,
+ leaky_relu_slope=0.2,
+ **kwargs,
+ ):
+ if not (len(resblock_kernel_sizes) == len(resblock_stride_sizes) == len(resblock_dilation_sizes)):
+ raise ValueError(
+ "`resblock_kernel_sizes`, `resblock_stride_sizes`, and `resblock_dilation_sizes` must all have the"
+ " same length (which will be the number of resnet blocks in the model)."
+ )
+
+ self.model_in_channels = model_in_channels
+ self.model_hidden_channels = model_hidden_channels
+ self.num_mel_bins = num_mel_bins
+ self.resblock_kernel_sizes = resblock_kernel_sizes
+ self.resblock_stride_sizes = resblock_stride_sizes
+ self.resblock_dilation_sizes = resblock_dilation_sizes
+ self.kernel_predictor_num_blocks = kernel_predictor_num_blocks
+ self.kernel_predictor_hidden_channels = kernel_predictor_hidden_channels
+ self.kernel_predictor_conv_size = kernel_predictor_conv_size
+ self.kernel_predictor_dropout = kernel_predictor_dropout
+ self.initializer_range = initializer_range
+ self.leaky_relu_slope = leaky_relu_slope
+ super().__init__(**kwargs)
diff --git a/venv/lib/python3.10/site-packages/transformers/models/univnet/convert_univnet.py b/venv/lib/python3.10/site-packages/transformers/models/univnet/convert_univnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..30520b7fa14725b0bdaf9e0c7a4aed92ad8ea318
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/univnet/convert_univnet.py
@@ -0,0 +1,162 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+import torch
+
+from transformers import UnivNetConfig, UnivNetModel, logging
+
+
+logging.set_verbosity_info()
+logger = logging.get_logger("transformers.models.univnet")
+
+
+def get_kernel_predictor_key_mapping(config: UnivNetConfig, old_prefix: str = "", new_prefix: str = ""):
+ mapping = {}
+ # Initial conv layer
+ mapping[f"{old_prefix}.input_conv.0.weight_g"] = f"{new_prefix}.input_conv.weight_g"
+ mapping[f"{old_prefix}.input_conv.0.weight_v"] = f"{new_prefix}.input_conv.weight_v"
+ mapping[f"{old_prefix}.input_conv.0.bias"] = f"{new_prefix}.input_conv.bias"
+
+ # Kernel predictor resnet blocks
+ for i in range(config.kernel_predictor_num_blocks):
+ mapping[f"{old_prefix}.residual_convs.{i}.1.weight_g"] = f"{new_prefix}.resblocks.{i}.conv1.weight_g"
+ mapping[f"{old_prefix}.residual_convs.{i}.1.weight_v"] = f"{new_prefix}.resblocks.{i}.conv1.weight_v"
+ mapping[f"{old_prefix}.residual_convs.{i}.1.bias"] = f"{new_prefix}.resblocks.{i}.conv1.bias"
+
+ mapping[f"{old_prefix}.residual_convs.{i}.3.weight_g"] = f"{new_prefix}.resblocks.{i}.conv2.weight_g"
+ mapping[f"{old_prefix}.residual_convs.{i}.3.weight_v"] = f"{new_prefix}.resblocks.{i}.conv2.weight_v"
+ mapping[f"{old_prefix}.residual_convs.{i}.3.bias"] = f"{new_prefix}.resblocks.{i}.conv2.bias"
+
+ # Kernel output conv
+ mapping[f"{old_prefix}.kernel_conv.weight_g"] = f"{new_prefix}.kernel_conv.weight_g"
+ mapping[f"{old_prefix}.kernel_conv.weight_v"] = f"{new_prefix}.kernel_conv.weight_v"
+ mapping[f"{old_prefix}.kernel_conv.bias"] = f"{new_prefix}.kernel_conv.bias"
+
+ # Bias output conv
+ mapping[f"{old_prefix}.bias_conv.weight_g"] = f"{new_prefix}.bias_conv.weight_g"
+ mapping[f"{old_prefix}.bias_conv.weight_v"] = f"{new_prefix}.bias_conv.weight_v"
+ mapping[f"{old_prefix}.bias_conv.bias"] = f"{new_prefix}.bias_conv.bias"
+
+ return mapping
+
+
+def get_key_mapping(config: UnivNetConfig):
+ mapping = {}
+
+ # NOTE: inital conv layer keys are the same
+
+ # LVC Residual blocks
+ for i in range(len(config.resblock_stride_sizes)):
+ # LVCBlock initial convt layer
+ mapping[f"res_stack.{i}.convt_pre.1.weight_g"] = f"resblocks.{i}.convt_pre.weight_g"
+ mapping[f"res_stack.{i}.convt_pre.1.weight_v"] = f"resblocks.{i}.convt_pre.weight_v"
+ mapping[f"res_stack.{i}.convt_pre.1.bias"] = f"resblocks.{i}.convt_pre.bias"
+
+ # Kernel predictor
+ kernel_predictor_mapping = get_kernel_predictor_key_mapping(
+ config, old_prefix=f"res_stack.{i}.kernel_predictor", new_prefix=f"resblocks.{i}.kernel_predictor"
+ )
+ mapping.update(kernel_predictor_mapping)
+
+ # LVC Residual blocks
+ for j in range(len(config.resblock_dilation_sizes[i])):
+ mapping[f"res_stack.{i}.conv_blocks.{j}.1.weight_g"] = f"resblocks.{i}.resblocks.{j}.conv.weight_g"
+ mapping[f"res_stack.{i}.conv_blocks.{j}.1.weight_v"] = f"resblocks.{i}.resblocks.{j}.conv.weight_v"
+ mapping[f"res_stack.{i}.conv_blocks.{j}.1.bias"] = f"resblocks.{i}.resblocks.{j}.conv.bias"
+
+ # Output conv layer
+ mapping["conv_post.1.weight_g"] = "conv_post.weight_g"
+ mapping["conv_post.1.weight_v"] = "conv_post.weight_v"
+ mapping["conv_post.1.bias"] = "conv_post.bias"
+
+ return mapping
+
+
+def rename_state_dict(state_dict, keys_to_modify, keys_to_remove):
+ model_state_dict = {}
+ for key, value in state_dict.items():
+ if key in keys_to_remove:
+ continue
+
+ if key in keys_to_modify:
+ new_key = keys_to_modify[key]
+ model_state_dict[new_key] = value
+ else:
+ model_state_dict[key] = value
+ return model_state_dict
+
+
+def convert_univnet_checkpoint(
+ checkpoint_path,
+ pytorch_dump_folder_path,
+ config_path=None,
+ repo_id=None,
+ safe_serialization=False,
+):
+ model_state_dict_base = torch.load(checkpoint_path, map_location="cpu")
+ # Get the generator's state dict
+ state_dict = model_state_dict_base["model_g"]
+
+ if config_path is not None:
+ config = UnivNetConfig.from_pretrained(config_path)
+ else:
+ config = UnivNetConfig()
+
+ keys_to_modify = get_key_mapping(config)
+ keys_to_remove = set()
+ hf_state_dict = rename_state_dict(state_dict, keys_to_modify, keys_to_remove)
+
+ model = UnivNetModel(config)
+ # Apply weight norm since the original checkpoint has weight norm applied
+ model.apply_weight_norm()
+ model.load_state_dict(hf_state_dict)
+ # Remove weight norm in preparation for inference
+ model.remove_weight_norm()
+
+ model.save_pretrained(pytorch_dump_folder_path, safe_serialization=safe_serialization)
+
+ if repo_id:
+ print("Pushing to the hub...")
+ model.push_to_hub(repo_id)
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
+ parser.add_argument(
+ "--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
+ )
+ parser.add_argument(
+ "--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
+ )
+ parser.add_argument(
+ "--safe_serialization", action="store_true", help="Whether to save the model using `safetensors`."
+ )
+
+ args = parser.parse_args()
+
+ convert_univnet_checkpoint(
+ args.checkpoint_path,
+ args.pytorch_dump_folder_path,
+ args.config_path,
+ args.push_to_hub,
+ args.safe_serialization,
+ )
+
+
+if __name__ == "__main__":
+ main()
diff --git a/venv/lib/python3.10/site-packages/transformers/models/univnet/feature_extraction_univnet.py b/venv/lib/python3.10/site-packages/transformers/models/univnet/feature_extraction_univnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..067aacc3d8c8ca51336680ee7afe8a9fec677fd7
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/univnet/feature_extraction_univnet.py
@@ -0,0 +1,456 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""Feature extractor class for UnivNetModel."""
+
+from typing import Any, Dict, List, Optional, Union
+
+import numpy as np
+
+from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
+from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
+from ...feature_extraction_utils import BatchFeature
+from ...utils import PaddingStrategy, TensorType, logging
+
+
+logger = logging.get_logger(__name__)
+
+
+class UnivNetFeatureExtractor(SequenceFeatureExtractor):
+ r"""
+ Constructs a UnivNet feature extractor.
+
+ This class extracts log-mel-filter bank features from raw speech using the short time Fourier Transform (STFT). The
+ STFT implementation follows that of TacoTron 2 and Hifi-GAN.
+
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
+
+ Args:
+ feature_size (`int`, *optional*, defaults to 1):
+ The feature dimension of the extracted features.
+ sampling_rate (`int`, *optional*, defaults to 24000):
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
+ padding_value (`float`, *optional*, defaults to 0.0):
+ The value to pad with when applying the padding strategy defined by the `padding` argument to
+ [`UnivNetFeatureExtractor.__call__`]. Should correspond to audio silence. The `pad_end` argument to
+ `__call__` will also use this padding value.
+ do_normalize (`bool`, *optional*, defaults to `False`):
+ Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve the
+ performance for some models.
+ num_mel_bins (`int`, *optional*, defaults to 100):
+ The number of mel-frequency bins in the extracted spectrogram features. This should match
+ `UnivNetModel.config.num_mel_bins`.
+ hop_length (`int`, *optional*, defaults to 256):
+ The direct number of samples between sliding windows. Otherwise referred to as "shift" in many papers. Note
+ that this is different from other audio feature extractors such as [`SpeechT5FeatureExtractor`] which take
+ the `hop_length` in ms.
+ win_length (`int`, *optional*, defaults to 1024):
+ The direct number of samples for each sliding window. Note that this is different from other audio feature
+ extractors such as [`SpeechT5FeatureExtractor`] which take the `win_length` in ms.
+ win_function (`str`, *optional*, defaults to `"hann_window"`):
+ Name for the window function used for windowing, must be accessible via `torch.{win_function}`
+ filter_length (`int`, *optional*, defaults to 1024):
+ The number of FFT components to use. If `None`, this is determined using
+ `transformers.audio_utils.optimal_fft_length`.
+ max_length_s (`int`, *optional*, defaults to 10):
+ The maximum input lenght of the model in seconds. This is used to pad the audio.
+ fmin (`float`, *optional*, defaults to 0.0):
+ Minimum mel frequency in Hz.
+ fmax (`float`, *optional*):
+ Maximum mel frequency in Hz. If not set, defaults to `sampling_rate / 2`.
+ mel_floor (`float`, *optional*, defaults to 1e-09):
+ Minimum value of mel frequency banks. Note that the way [`UnivNetFeatureExtractor`] uses `mel_floor` is
+ different than in [`transformers.audio_utils.spectrogram`].
+ center (`bool`, *optional*, defaults to `False`):
+ Whether to pad the waveform so that frame `t` is centered around time `t * hop_length`. If `False`, frame
+ `t` will start at time `t * hop_length`.
+ compression_factor (`float`, *optional*, defaults to 1.0):
+ The multiplicative compression factor for dynamic range compression during spectral normalization.
+ compression_clip_val (`float`, *optional*, defaults to 1e-05):
+ The clip value applied to the waveform before applying dynamic range compression during spectral
+ normalization.
+ normalize_min (`float`, *optional*, defaults to -11.512925148010254):
+ The min value used for Tacotron 2-style linear normalization. The default is the original value from the
+ Tacotron 2 implementation.
+ normalize_max (`float`, *optional*, defaults to 2.3143386840820312):
+ The max value used for Tacotron 2-style linear normalization. The default is the original value from the
+ Tacotron 2 implementation.
+ model_in_channels (`int`, *optional*, defaults to 64):
+ The number of input channels to the [`UnivNetModel`] model. This should match
+ `UnivNetModel.config.model_in_channels`.
+ pad_end_length (`int`, *optional*, defaults to 10):
+ If padding the end of each waveform, the number of spectrogram frames worth of samples to append. The
+ number of appended samples will be `pad_end_length * hop_length`.
+ return_attention_mask (`bool`, *optional*, defaults to `True`):
+ Whether or not [`~UnivNetFeatureExtractor.__call__`] should return `attention_mask`.
+ """
+
+ model_input_names = ["input_features", "noise_sequence", "padding_mask"]
+
+ def __init__(
+ self,
+ feature_size: int = 1,
+ sampling_rate: int = 24000,
+ padding_value: float = 0.0,
+ do_normalize: bool = False,
+ num_mel_bins: int = 100,
+ hop_length: int = 256,
+ win_length: int = 1024,
+ win_function: str = "hann_window",
+ filter_length: Optional[int] = 1024,
+ max_length_s: int = 10,
+ fmin: float = 0.0,
+ fmax: Optional[float] = None,
+ mel_floor: float = 1e-9,
+ center: bool = False,
+ compression_factor: float = 1.0,
+ compression_clip_val: float = 1e-5,
+ normalize_min: float = -11.512925148010254,
+ normalize_max: float = 2.3143386840820312,
+ model_in_channels: int = 64,
+ pad_end_length: int = 10,
+ return_attention_mask=True,
+ **kwargs,
+ ):
+ super().__init__(
+ feature_size=feature_size,
+ sampling_rate=sampling_rate,
+ padding_value=padding_value,
+ return_attention_mask=return_attention_mask,
+ **kwargs,
+ )
+
+ self.do_normalize = do_normalize
+
+ self.num_mel_bins = num_mel_bins
+ self.hop_length = hop_length
+ self.win_length = win_length
+ self.win_function = win_function
+ self.filter_length = filter_length
+ self.fmin = fmin
+ if fmax is None:
+ # Follows the librosa.filters.mel implementation
+ fmax = float(sampling_rate) / 2
+ self.fmax = fmax
+ self.mel_floor = mel_floor
+
+ self.max_length_s = max_length_s
+ self.num_max_samples = max_length_s * sampling_rate
+
+ if self.filter_length is None:
+ self.n_fft = optimal_fft_length(self.win_length)
+ else:
+ self.n_fft = self.filter_length
+ self.n_freqs = (self.n_fft // 2) + 1
+
+ self.window = window_function(window_length=self.win_length, name=self.win_function, periodic=True)
+
+ self.mel_filters = mel_filter_bank(
+ num_frequency_bins=self.n_freqs,
+ num_mel_filters=self.num_mel_bins,
+ min_frequency=self.fmin,
+ max_frequency=self.fmax,
+ sampling_rate=self.sampling_rate,
+ norm="slaney",
+ mel_scale="slaney",
+ )
+
+ self.center = center
+ self.compression_factor = compression_factor
+ self.compression_clip_val = compression_clip_val
+ self.normalize_min = normalize_min
+ self.normalize_max = normalize_max
+ self.model_in_channels = model_in_channels
+ self.pad_end_length = pad_end_length
+
+ def normalize(self, spectrogram):
+ return 2 * ((spectrogram - self.normalize_min) / (self.normalize_max - self.normalize_min)) - 1
+
+ def denormalize(self, spectrogram):
+ return self.normalize_min + (self.normalize_max - self.normalize_min) * ((spectrogram + 1) / 2)
+
+ def mel_spectrogram(self, waveform: np.ndarray) -> np.ndarray:
+ """
+ Calculates log MEL spectrograms from a batch of waveforms. Note that the input waveform(s) will be padded by
+ `int(self.n_fft - self.hop_length) / 2` on both sides using the `reflect` padding mode.
+
+ Args:
+ waveform (`np.ndarray` of shape `(length,)`):
+ The input waveform. This must be a single real-valued, mono waveform.
+
+ Returns:
+ `numpy.ndarray`: Array containing a log-mel spectrogram of shape `(num_frames, num_mel_bins)`.
+ """
+ # Do custom padding based on the official MelGAN and Hifi-GAN implementations
+ # See https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/utils/stft.py#L84-L86
+ waveform = np.pad(
+ waveform,
+ (int((self.n_fft - self.hop_length) / 2), int((self.n_fft - self.hop_length) / 2)),
+ mode="reflect",
+ )
+
+ # Get the complex spectrogram.
+ # Note: waveform must be unbatched currently due to the implementation of spectrogram(...).
+ complex_spectrogram = spectrogram(
+ waveform,
+ window=self.window,
+ frame_length=self.n_fft,
+ hop_length=self.hop_length,
+ fft_length=self.n_fft,
+ power=None,
+ center=self.center,
+ mel_filters=None,
+ mel_floor=None,
+ )
+
+ # Apply the MEL filter bank and MEL floor manually since UnivNet uses a slightly different implementation
+ amplitude_spectrogram = np.sqrt(
+ np.real(complex_spectrogram) ** 2 + np.imag(complex_spectrogram) ** 2 + self.mel_floor
+ )
+ mel_spectrogram = np.matmul(self.mel_filters.T, amplitude_spectrogram)
+
+ # Perform spectral normalization to get the log mel spectrogram.
+ log_mel_spectrogram = np.log(
+ np.clip(mel_spectrogram, a_min=self.compression_clip_val, a_max=None) * self.compression_factor
+ )
+
+ # Return spectrogram with num_mel_bins last
+ return log_mel_spectrogram.T
+
+ def generate_noise(
+ self,
+ noise_length: int,
+ generator: Optional[np.random.Generator] = None,
+ ) -> np.ndarray:
+ """
+ Generates a random noise sequence of standard Gaussian noise for use in the `noise_sequence` argument of
+ [`UnivNetModel.forward`].
+
+ Args:
+ spectrogram_length (`int`):
+ The length (dim 0) of the generated noise.
+ model_in_channels (`int`, *optional*, defaults to `None`):
+ The number of features (dim 1) of the generated noise. This should correspond to the
+ `model_in_channels` of the [`UnivNetGan`] model. If not set, this will default to
+ `self.config.model_in_channels`.
+ generator (`numpy.random.Generator`, *optional*, defaults to `None`)
+ An optional `numpy.random.Generator` random number generator to control noise generation. If not set, a
+ new generator with fresh entropy will be created.
+
+ Returns:
+ `numpy.ndarray`: Array containing random standard Gaussian noise of shape `(noise_length,
+ model_in_channels)`.
+ """
+ if generator is None:
+ generator = np.random.default_rng()
+
+ noise_shape = (noise_length, self.model_in_channels)
+ noise = generator.standard_normal(noise_shape, dtype=np.float32)
+
+ return noise
+
+ def batch_decode(self, waveforms, waveform_lengths=None) -> List[np.ndarray]:
+ r"""
+ Removes padding from generated audio after running [`UnivNetModel.forward`]. This returns a ragged list of 1D
+ audio waveform arrays and not a single tensor/array because in general the waveforms will have different
+ lengths after removing padding.
+
+ Args:
+ waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ The batched output waveforms from the [`UnivNetModel`].
+ waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`, *optional*):
+ The batched lengths of each waveform before padding.
+
+ Returns:
+ `List[np.ndarray]`: A ragged list of 1D waveform arrays with padding removed.
+ """
+ # Collapse the batched waveform tensor to a list of 1D audio waveforms
+ waveforms = [waveform.detach().clone().cpu().numpy() for waveform in waveforms]
+
+ if waveform_lengths is not None:
+ waveforms = [waveform[: waveform_lengths[i]] for i, waveform in enumerate(waveforms)]
+
+ return waveforms
+
+ def __call__(
+ self,
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
+ sampling_rate: Optional[int] = None,
+ padding: Union[bool, str, PaddingStrategy] = True,
+ max_length: Optional[int] = None,
+ truncation: bool = True,
+ pad_to_multiple_of: Optional[int] = None,
+ return_noise: bool = True,
+ generator: Optional[np.random.Generator] = None,
+ pad_end: bool = False,
+ pad_length: Optional[int] = None,
+ do_normalize: Optional[str] = None,
+ return_attention_mask: Optional[bool] = None,
+ return_tensors: Optional[Union[str, TensorType]] = None,
+ ) -> BatchFeature:
+ """
+ Main method to featurize and prepare for the model one or several sequence(s).
+
+ Args:
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
+ stereo, i.e. single float per timestep.
+ sampling_rate (`int`, *optional*):
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
+ `sampling_rate` at the forward call to prevent silent errors and allow automatic speech recognition
+ pipeline.
+ padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):
+ Select a strategy to pad the input `raw_speech` waveforms (according to the model's padding side and
+ padding index) among:
+
+ - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
+ sequence if provided).
+ - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
+ acceptable input length for the model if that argument is not provided.
+ - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
+ lengths).
+
+ If `pad_end = True`, that padding will occur before the `padding` strategy is applied.
+ max_length (`int`, *optional*):
+ Maximum length of the returned list and optionally padding length (see above).
+ truncation (`bool`, *optional*, defaults to `True`):
+ Activates truncation to cut input sequences longer than `max_length` to `max_length`.
+ pad_to_multiple_of (`int`, *optional*):
+ If set will pad the sequence to a multiple of the provided value.
+
+ This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability
+ `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.
+ return_noise (`bool`, *optional*, defaults to `True`):
+ Whether to generate and return a noise waveform for use in [`UnivNetModel.forward`].
+ generator (`numpy.random.Generator`, *optional*, defaults to `None`):
+ An optional `numpy.random.Generator` random number generator to use when generating noise.
+ pad_end (`bool`, *optional*, defaults to `False`):
+ Whether to pad the end of each waveform with silence. This can help reduce artifacts at the end of the
+ generated audio sample; see https://github.com/seungwonpark/melgan/issues/8 for more details. This
+ padding will be done before the padding strategy specified in `padding` is performed.
+ pad_length (`int`, *optional*, defaults to `None`):
+ If padding the end of each waveform, the length of the padding in spectrogram frames. If not set, this
+ will default to `self.config.pad_end_length`.
+ do_normalize (`bool`, *optional*):
+ Whether to perform Tacotron 2 normalization on the input. Normalizing can help to significantly improve
+ the performance for some models. If not set, this will default to `self.config.do_normalize`.
+ return_attention_mask (`bool`, *optional*):
+ Whether to return the attention mask. If left to the default, will return the attention mask according
+ to the specific feature_extractor's default.
+
+ [What are attention masks?](../glossary#attention-mask)
+
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
+ If set, will return tensors instead of list of python integers. Acceptable values are:
+
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
+ - `'pt'`: Return PyTorch `torch.np.array` objects.
+ - `'np'`: Return Numpy `np.ndarray` objects.
+ """
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
+
+ if sampling_rate is not None:
+ if sampling_rate != self.sampling_rate:
+ raise ValueError(
+ f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
+ f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
+ f" was sampled with {self.sampling_rate} and not {sampling_rate}."
+ )
+ else:
+ logger.warning(
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
+ "Failing to do so can result in silent errors that might be hard to debug."
+ )
+
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
+ if is_batched_numpy and len(raw_speech.shape) > 2:
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
+ is_batched = is_batched_numpy or (
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
+ )
+
+ if is_batched:
+ raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
+ raw_speech = raw_speech.astype(np.float32)
+
+ # always return batch
+ if not is_batched:
+ raw_speech = [np.asarray(raw_speech, dtype=np.float32)]
+
+ # Pad end to reduce artifacts
+ if pad_end:
+ pad_length = pad_length if pad_length is not None else self.pad_end_length
+ raw_speech = [
+ np.pad(waveform, (0, pad_length * self.hop_length), constant_values=self.padding_value)
+ for waveform in raw_speech
+ ]
+
+ batched_speech = BatchFeature({"input_features": raw_speech})
+
+ padded_inputs = self.pad(
+ batched_speech,
+ padding=padding,
+ max_length=max_length if max_length is not None else self.num_max_samples,
+ truncation=truncation,
+ pad_to_multiple_of=pad_to_multiple_of,
+ return_attention_mask=return_attention_mask,
+ )
+
+ # make sure list is in array format
+ # input_features = padded_inputs.get("input_features").transpose(2, 0, 1)
+ input_features = padded_inputs.get("input_features")
+
+ mel_spectrograms = [self.mel_spectrogram(waveform) for waveform in input_features]
+
+ if isinstance(input_features[0], List):
+ batched_speech["input_features"] = [np.asarray(mel, dtype=np.float32) for mel in mel_spectrograms]
+ else:
+ batched_speech["input_features"] = [mel.astype(np.float32) for mel in mel_spectrograms]
+
+ # convert attention_mask to correct format
+ attention_mask = padded_inputs.get("attention_mask")
+ if attention_mask is not None:
+ batched_speech["padding_mask"] = [np.asarray(array, dtype=np.int32) for array in attention_mask]
+
+ if return_noise:
+ noise = [
+ self.generate_noise(spectrogram.shape[0], generator)
+ for spectrogram in batched_speech["input_features"]
+ ]
+ batched_speech["noise_sequence"] = noise
+
+ if do_normalize:
+ batched_speech["input_features"] = [
+ self.normalize(spectrogram) for spectrogram in batched_speech["input_features"]
+ ]
+
+ if return_tensors is not None:
+ batched_speech = batched_speech.convert_to_tensors(return_tensors)
+
+ return batched_speech
+
+ def to_dict(self) -> Dict[str, Any]:
+ output = super().to_dict()
+
+ # Don't serialize these as they are derived from the other properties.
+ names = ["window", "mel_filters", "n_fft", "n_freqs", "num_max_samples"]
+ for name in names:
+ if name in output:
+ del output[name]
+
+ return output
diff --git a/venv/lib/python3.10/site-packages/transformers/models/univnet/modeling_univnet.py b/venv/lib/python3.10/site-packages/transformers/models/univnet/modeling_univnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2551d7265319635b970089a143da5e98253121f
--- /dev/null
+++ b/venv/lib/python3.10/site-packages/transformers/models/univnet/modeling_univnet.py
@@ -0,0 +1,634 @@
+# Copyright 2023 The HuggingFace Team. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+""" PyTorch UnivNetModel model."""
+
+from dataclasses import dataclass
+from typing import Optional, Tuple, Union
+
+import torch
+import torch.utils.checkpoint
+from torch import nn
+
+from ...modeling_utils import ModelOutput, PreTrainedModel
+from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
+from .configuration_univnet import UnivNetConfig
+
+
+logger = logging.get_logger(__name__)
+
+# General docstring
+_CONFIG_FOR_DOC = "UnivNetConfig"
+
+_CHECKPOINT_FOR_DOC = "dg845/univnet-dev"
+
+
+from ..deprecated._archive_maps import UNIVNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
+
+
+@dataclass
+class UnivNetModelOutput(ModelOutput):
+ """
+ Output class for the [`UnivNetModel`], which includes the generated audio waveforms and the original unpadded
+ lengths of those waveforms (so that the padding can be removed by [`UnivNetModel.batch_decode`]).
+
+ Args:
+ waveforms (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
+ Batched 1D (mono-channel) output audio waveforms.
+ waveform_lengths (`torch.FloatTensor` of shape `(batch_size,)`):
+ The batched length in samples of each unpadded waveform in `waveforms`.
+ """
+
+ waveforms: torch.FloatTensor = None
+ waveform_lengths: torch.FloatTensor = None
+
+
+class UnivNetKernelPredictorResidualBlock(nn.Module):
+ """
+ Implementation of the residual block for the kernel predictor network inside each location variable convolution
+ block (LVCBlock).
+
+ Parameters:
+ config: (`UnivNetConfig`):
+ Config for the `UnivNetModel` model.
+ """
+
+ def __init__(
+ self,
+ config: UnivNetConfig,
+ ):
+ super().__init__()
+ self.channels = config.model_in_channels
+ self.kernel_size = config.kernel_predictor_conv_size
+ self.dropout_prob = config.kernel_predictor_dropout
+ self.leaky_relu_slope = config.leaky_relu_slope
+
+ padding = (self.kernel_size - 1) // 2
+
+ self.dropout = nn.Dropout(self.dropout_prob)
+ self.conv1 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True)
+ self.conv2 = nn.Conv1d(self.channels, self.channels, self.kernel_size, padding=padding, bias=True)
+
+ def forward(self, hidden_states: torch.FloatTensor):
+ # hidden_states should have shape (batch_size, channels, seq_length)
+ residual = hidden_states
+ hidden_states = self.dropout(hidden_states)
+ hidden_states = self.conv1(hidden_states)
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = self.conv2(hidden_states)
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ return hidden_states + residual
+
+ def apply_weight_norm(self):
+ nn.utils.weight_norm(self.conv1)
+ nn.utils.weight_norm(self.conv2)
+
+ def remove_weight_norm(self):
+ nn.utils.remove_weight_norm(self.conv1)
+ nn.utils.remove_weight_norm(self.conv2)
+
+
+class UnivNetKernelPredictor(nn.Module):
+ """
+ Implementation of the kernel predictor network which supplies the kernel and bias for the location variable
+ convolutional layers (LVCs) in each UnivNet LVCBlock.
+
+ Based on the KernelPredictor implementation in
+ [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L7).
+
+ Parameters:
+ config: (`UnivNetConfig`):
+ Config for the `UnivNetModel` model.
+ conv_kernel_size (`int`, *optional*, defaults to 3):
+ The kernel size for the location variable convolutional layer kernels (convolutional weight tensor).
+ conv_layers (`int`, *optional*, defaults to 4):
+ The number of location variable convolutional layers to output kernels and biases for.
+ """
+
+ def __init__(
+ self,
+ config: UnivNetConfig,
+ conv_kernel_size: int = 3,
+ conv_layers: int = 4,
+ ):
+ super().__init__()
+
+ self.conv_in_channels = config.model_hidden_channels
+ self.conv_out_channels = 2 * config.model_hidden_channels
+ self.conv_kernel_size = conv_kernel_size
+ self.conv_layers = conv_layers
+
+ self.kernel_channels = (
+ self.conv_in_channels * self.conv_out_channels * self.conv_kernel_size * self.conv_layers
+ )
+ self.bias_channels = self.conv_out_channels * self.conv_layers
+
+ self.resnet_in_channels = config.num_mel_bins
+ self.resnet_hidden_channels = config.kernel_predictor_hidden_channels
+ self.resnet_kernel_size = config.kernel_predictor_conv_size
+ self.num_blocks = config.kernel_predictor_num_blocks
+
+ self.leaky_relu_slope = config.leaky_relu_slope
+
+ padding = (self.resnet_kernel_size - 1) // 2
+
+ self.input_conv = nn.Conv1d(self.resnet_in_channels, self.resnet_hidden_channels, 5, padding=2, bias=True)
+
+ self.resblocks = nn.ModuleList([UnivNetKernelPredictorResidualBlock(config) for _ in range(self.num_blocks)])
+
+ self.kernel_conv = nn.Conv1d(
+ self.resnet_hidden_channels, self.kernel_channels, self.resnet_kernel_size, padding=padding, bias=True
+ )
+ self.bias_conv = nn.Conv1d(
+ self.resnet_hidden_channels, self.bias_channels, self.resnet_kernel_size, padding=padding, bias=True
+ )
+
+ def forward(self, spectrogram: torch.FloatTensor):
+ """
+ Maps a conditioning log-mel spectrogram to a tensor of convolutional kernels and biases, for use in location
+ variable convolutional layers. Note that the input spectrogram should have shape (batch_size, input_channels,
+ seq_length).
+
+ Args:
+ spectrogram (`torch.FloatTensor` of shape `(batch_size, input_channels, seq_length)`):
+ Tensor containing the log-mel spectrograms.
+
+ Returns:
+ Tuple[`torch.FloatTensor, `torch.FloatTensor`]: tuple of tensors where the first element is the tensor of
+ location variable convolution kernels of shape `(batch_size, self.conv_layers, self.conv_in_channels,
+ self.conv_out_channels, self.conv_kernel_size, seq_length)` and the second element is the tensor of
+ location variable convolution biases of shape `(batch_size, self.conv_layers. self.conv_out_channels,
+ seq_length)`.
+ """
+ batch_size, _, seq_length = spectrogram.shape
+
+ hidden_states = self.input_conv(spectrogram)
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+
+ for resblock in self.resblocks:
+ hidden_states = resblock(hidden_states)
+
+ kernel_hidden_states = self.kernel_conv(hidden_states)
+ bias_hidden_states = self.bias_conv(hidden_states)
+
+ # Reshape kernels and biases to appropriate shape
+ kernels = kernel_hidden_states.view(
+ batch_size,
+ self.conv_layers,
+ self.conv_in_channels,
+ self.conv_out_channels,
+ self.conv_kernel_size,
+ seq_length,
+ ).contiguous()
+ biases = bias_hidden_states.view(
+ batch_size,
+ self.conv_layers,
+ self.conv_out_channels,
+ seq_length,
+ ).contiguous()
+
+ return kernels, biases
+
+ def apply_weight_norm(self):
+ nn.utils.weight_norm(self.input_conv)
+ for layer in self.resblocks:
+ layer.apply_weight_norm()
+ nn.utils.weight_norm(self.kernel_conv)
+ nn.utils.weight_norm(self.bias_conv)
+
+ def remove_weight_norm(self):
+ nn.utils.remove_weight_norm(self.input_conv)
+ for layer in self.resblocks:
+ layer.remove_weight_norm()
+ nn.utils.remove_weight_norm(self.kernel_conv)
+ nn.utils.remove_weight_norm(self.bias_conv)
+
+
+class UnivNetLvcResidualBlock(nn.Module):
+ """
+ Implementation of the location variable convolution (LVC) residual block for the UnivNet residual network.
+
+ Parameters:
+ config: (`UnivNetConfig`):
+ Config for the `UnivNetModel` model.
+ kernel_size (`int`):
+ The kernel size for the dilated 1D convolutional layer.
+ dilation (`int`):
+ The dilation for the dilated 1D convolutional layer.
+ """
+
+ def __init__(
+ self,
+ config: UnivNetConfig,
+ kernel_size: int,
+ dilation: int,
+ ):
+ super().__init__()
+ self.hidden_channels = config.model_hidden_channels
+ self.kernel_size = kernel_size
+ self.dilation = dilation
+ self.leaky_relu_slope = config.leaky_relu_slope
+
+ padding = self.dilation * (self.kernel_size - 1) // 2
+
+ self.conv = nn.Conv1d(
+ self.hidden_channels,
+ self.hidden_channels,
+ self.kernel_size,
+ padding=padding,
+ dilation=self.dilation,
+ )
+
+ def forward(self, hidden_states, kernel, bias, hop_size=256):
+ residual = hidden_states
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = self.conv(hidden_states)
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = self.location_variable_convolution(hidden_states, kernel, bias, hop_size=hop_size)
+ # Gated activation unit
+ hidden_states = torch.sigmoid(hidden_states[:, : self.hidden_channels, :]) * torch.tanh(
+ hidden_states[:, self.hidden_channels :, :]
+ )
+ # Skip connection
+ hidden_states = residual + hidden_states
+
+ return hidden_states
+
+ # Based on https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L171
+ def location_variable_convolution(
+ self,
+ hidden_states: torch.FloatTensor,
+ kernel: torch.FloatTensor,
+ bias: torch.FloatTensor,
+ dilation: int = 1,
+ hop_size: int = 256,
+ ):
+ """
+ Performs location-variable convolution operation on the input sequence (hidden_states) using the local
+ convolution kernel. This was introduced in [LVCNet: Efficient Condition-Dependent Modeling Network for Waveform
+ Generation](https://arxiv.org/abs/2102.10815) by Zhen Zheng, Jianzong Wang, Ning Cheng, and Jing Xiao.
+
+ Time: 414 μs ± 309 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each), test on NVIDIA V100.
+
+ Args:
+ hidden_states (`torch.FloatTensor` of shape `(batch_size, in_channels, in_length)`):
+ The input sequence of shape (batch, in_channels, in_length).
+ kernel (`torch.FloatTensor` of shape `(batch_size, in_channels, out_channels, kernel_size, kernel_length)`):
+ The local convolution kernel of shape (batch, in_channels, out_channels, kernel_size, kernel_length).
+ bias (`torch.FloatTensor` of shape `(batch_size, out_channels, kernel_length)`):
+ The bias for the local convolution of shape (batch, out_channels, kernel_length).
+ dilation (`int`, *optional*, defaults to 1):
+ The dilation of convolution.
+ hop_size (`int`, *optional*, defaults to 256):
+ The hop_size of the conditioning sequence.
+ Returns:
+ `torch.FloatTensor`: the output sequence after performing local convolution with shape (batch_size,
+ out_channels, in_length).
+ """
+ batch, _, in_length = hidden_states.shape
+ batch, _, out_channels, kernel_size, kernel_length = kernel.shape
+ if in_length != (kernel_length * hop_size):
+ raise ValueError(
+ f"Dim 2 of `hidden_states` should be {kernel_length * hop_size}) but got {in_length}. Please check"
+ " `hidden_states` or `kernel` and `hop_size` to make sure they are correct."
+ )
+
+ padding = dilation * int((kernel_size - 1) / 2)
+
+ # (batch, in_channels, in_length + 2*padding)
+ hidden_states = nn.functional.pad(hidden_states, (padding, padding), "constant", 0)
+ # (batch, in_channels, kernel_length, hop_size + 2*padding)
+ hidden_states = hidden_states.unfold(2, hop_size + 2 * padding, hop_size)
+
+ if hop_size < dilation:
+ hidden_states = nn.functional.pad(hidden_states, (0, dilation), "constant", 0)
+ # (batch, in_channels, kernel_length, (hop_size + 2*padding)/dilation, dilation)
+ hidden_states = hidden_states.unfold(3, dilation, dilation)
+ hidden_states = hidden_states[:, :, :, :, :hop_size]
+ # (batch, in_channels, kernel_length, dilation, (hop_size + 2*padding)/dilation)
+ hidden_states = hidden_states.transpose(3, 4)
+ # (batch, in_channels, kernel_length, dilation, _, kernel_size)
+ hidden_states = hidden_states.unfold(4, kernel_size, 1)
+
+ # Apply local convolution kernel to hidden_states.
+ output_hidden_states = torch.einsum("bildsk,biokl->bolsd", hidden_states, kernel)
+
+ output_hidden_states = output_hidden_states.to(memory_format=torch.channels_last_3d)
+ bias = bias.unsqueeze(-1).unsqueeze(-1).to(memory_format=torch.channels_last_3d)
+ output_hidden_states = output_hidden_states + bias
+ output_hidden_states = output_hidden_states.contiguous().view(batch, out_channels, -1)
+
+ return output_hidden_states
+
+ def apply_weight_norm(self):
+ nn.utils.weight_norm(self.conv)
+
+ def remove_weight_norm(self):
+ nn.utils.remove_weight_norm(self.conv)
+
+
+class UnivNetLvcBlock(nn.Module):
+ """
+ Implementation of the location variable convolution (LVC) residual block of the UnivNet residual block. Includes a
+ `UnivNetKernelPredictor` inside to predict the kernels and biases of the LVC layers.
+
+ Based on LVCBlock in
+ [maum-ai/univnet](https://github.com/maum-ai/univnet/blob/9bb2b54838bb6d7ce767131cc7b8b61198bc7558/model/lvcnet.py#L98)
+
+ Parameters:
+ config (`UnivNetConfig`):
+ Config for the `UnivNetModel` model.
+ layer_id (`int`):
+ An integer corresponding to the index of the current LVC resnet block layer. This should be between 0 and
+ `len(config.resblock_stride_sizes) - 1)` inclusive.
+ lvc_hop_size (`int`, *optional*, defaults to 256):
+ The hop size for the location variable convolutional layers.
+ """
+
+ def __init__(
+ self,
+ config: UnivNetConfig,
+ layer_id: int,
+ lvc_hop_size: int = 256,
+ ):
+ super().__init__()
+ self.hidden_channels = config.model_hidden_channels
+ self.kernel_size = config.resblock_kernel_sizes[layer_id]
+ self.stride = config.resblock_stride_sizes[layer_id]
+ self.dilations = config.resblock_dilation_sizes[layer_id]
+ self.cond_hop_length = lvc_hop_size
+ self.leaky_relu_slope = config.leaky_relu_slope
+ self.num_blocks = len(self.dilations)
+
+ self.convt_pre = nn.ConvTranspose1d(
+ self.hidden_channels,
+ self.hidden_channels,
+ 2 * self.stride,
+ stride=self.stride,
+ padding=self.stride // 2 + self.stride % 2,
+ output_padding=self.stride % 2,
+ )
+
+ self.kernel_predictor = UnivNetKernelPredictor(config, self.kernel_size, self.num_blocks)
+
+ self.resblocks = nn.ModuleList(
+ [UnivNetLvcResidualBlock(config, self.kernel_size, self.dilations[i]) for i in range(self.num_blocks)]
+ )
+
+ def forward(self, hidden_states: torch.FloatTensor, spectrogram: torch.FloatTensor):
+ # hidden_states: (batch_size, hidden_channels, seq_length)
+ # spectrogram: (batch_size, cond_channels, cond_length)
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = self.convt_pre(hidden_states)
+
+ kernels, biases = self.kernel_predictor(spectrogram)
+
+ for i, resblock in enumerate(self.resblocks):
+ kernel = kernels[:, i, :, :, :, :]
+ bias = biases[:, i, :, :]
+ hidden_states = resblock(hidden_states, kernel, bias, hop_size=self.cond_hop_length)
+
+ return hidden_states
+
+ def apply_weight_norm(self):
+ nn.utils.weight_norm(self.convt_pre)
+ self.kernel_predictor.apply_weight_norm()
+ for layer in self.resblocks:
+ layer.apply_weight_norm()
+
+ def remove_weight_norm(self):
+ nn.utils.remove_weight_norm(self.convt_pre)
+ self.kernel_predictor.remove_weight_norm()
+ for layer in self.resblocks:
+ layer.remove_weight_norm()
+
+
+UNIVNET_START_DOCSTRING = r"""
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
+ etc.)
+
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
+ and behavior.
+
+ Parameters:
+ config ([`UnivNetConfig`]):
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
+ load the weights associated with the model, only the configuration. Check out the
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
+"""
+
+
+UNIVNET_INPUTS_DOCSTRING = r"""
+ Converts a noise waveform and a conditioning spectrogram to a speech waveform. Passing a batch of log-mel
+ spectrograms returns a batch of speech waveforms. Passing a single, un-batched log-mel spectrogram returns a
+ single, un-batched speech waveform.
+
+ Args:
+ input_features (`torch.FloatTensor`):
+ Tensor containing the log-mel spectrograms. Can be batched and of shape `(batch_size, sequence_length,
+ config.num_mel_channels)`, or un-batched and of shape `(sequence_length, config.num_mel_channels)`.
+ noise_sequence (`torch.FloatTensor`, *optional*):
+ Tensor containing a noise sequence of standard Gaussian noise. Can be batched and of shape `(batch_size,
+ sequence_length, config.model_in_channels)`, or un-batched and of shape (sequence_length,
+ config.model_in_channels)`. If not supplied, will be randomly generated.
+ padding_mask (`torch.BoolTensor`, *optional*):
+ Mask indicating which parts of each sequence are padded. Mask values are selected in `[0, 1]`:
+
+ - 1 for tokens that are **not masked**
+ - 0 for tokens that are **masked**
+
+ The mask can be batched and of shape `(batch_size, sequence_length)` or un-batched and of shape
+ `(sequence_length,)`.
+ generator (`torch.Generator`, *optional*):
+ A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation
+ deterministic.
+ return_dict:
+ Whether to return a [`~utils.ModelOutput`] subclass instead of a plain tuple.
+"""
+
+
+@add_start_docstrings(
+ """UnivNet GAN vocoder.""",
+ UNIVNET_START_DOCSTRING,
+)
+class UnivNetModel(PreTrainedModel):
+ config_class = UnivNetConfig
+ main_input_name = "input_features"
+
+ def __init__(self, config: UnivNetConfig):
+ super().__init__(config)
+
+ self.num_kernels = len(config.resblock_kernel_sizes)
+ self.leaky_relu_slope = config.leaky_relu_slope
+
+ self.conv_pre = nn.Conv1d(
+ config.model_in_channels,
+ config.model_hidden_channels,
+ kernel_size=7,
+ stride=1,
+ padding=3,
+ padding_mode="reflect",
+ )
+
+ # Initialize location-variable convolution ResNet Blocks.
+ num_layers = len(config.resblock_stride_sizes)
+ hop_length = 1
+ hop_lengths = []
+ for stride in config.resblock_stride_sizes:
+ hop_length = hop_length * stride
+ hop_lengths.append(hop_length)
+
+ self.resblocks = nn.ModuleList(
+ [
+ UnivNetLvcBlock(
+ config,
+ layer_id=i,
+ lvc_hop_size=hop_lengths[i],
+ )
+ for i in range(num_layers)
+ ]
+ )
+
+ self.conv_post = nn.Conv1d(config.model_hidden_channels, 1, 7, padding=3, padding_mode="reflect")
+
+ # Initialize weights and apply final processing
+ self.post_init()
+
+ @add_start_docstrings_to_model_forward(UNIVNET_INPUTS_DOCSTRING)
+ @replace_return_docstrings(output_type=UnivNetModelOutput, config_class=_CONFIG_FOR_DOC)
+ def forward(
+ self,
+ input_features: torch.FloatTensor,
+ noise_sequence: Optional[torch.FloatTensor] = None,
+ padding_mask: Optional[torch.FloatTensor] = None,
+ generator: Optional[torch.Generator] = None,
+ return_dict: Optional[bool] = None,
+ ) -> Union[Tuple[torch.FloatTensor], UnivNetModelOutput]:
+ r"""
+ Returns:
+
+ Example:
+
+ ```python
+ >>> from transformers import UnivNetFeatureExtractor, UnivNetModel
+ >>> from datasets import load_dataset, Audio
+
+ >>> model = UnivNetModel.from_pretrained("dg845/univnet-dev")
+ >>> feature_extractor = UnivNetFeatureExtractor.from_pretrained("dg845/univnet-dev")
+
+ >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
+ >>> # Resample the audio to the feature extractor's sampling rate.
+ >>> ds = ds.cast_column("audio", Audio(sampling_rate=feature_extractor.sampling_rate))
+ >>> inputs = feature_extractor(
+ ... ds[0]["audio"]["array"], sampling_rate=ds[0]["audio"]["sampling_rate"], return_tensors="pt"
+ ... )
+ >>> audio = model(**inputs).waveforms
+ >>> list(audio.shape)
+ [1, 140288]
+ ```
+ """
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
+
+ # Resolve batch sizes for noise_sequence and spectrogram
+ spectrogram_batched = input_features.dim() == 3
+ if not spectrogram_batched:
+ input_features = input_features.unsqueeze(0)
+ spectrogram_batch_size, spectrogram_length, _ = input_features.shape
+
+ if noise_sequence is not None:
+ noise_sequence_batched = noise_sequence.dim() == 3
+ if not noise_sequence_batched:
+ noise_sequence = noise_sequence.unsqueeze(0)
+ else:
+ # Randomly generate noise_sequence
+ noise_sequence_shape = (spectrogram_batch_size, spectrogram_length, self.config.model_in_channels)
+ noise_sequence = torch.randn(
+ noise_sequence_shape, generator=generator, dtype=input_features.dtype, device=input_features.device
+ )
+ noise_sequence_batch_size = noise_sequence.shape[0]
+
+ if spectrogram_batch_size > 1 and noise_sequence_batch_size == 1:
+ # Repeat noise_sequence spectrogram_batch_size times
+ noise_sequence = noise_sequence.repeat(spectrogram_batch_size, 1, 1)
+ elif noise_sequence_batch_size > 1 and spectrogram_batch_size == 1:
+ # Repeat spectrogram noise_sequence_batch_size times
+ input_features = input_features.repeat(noise_sequence_batch_size, 1, 1)
+
+ if noise_sequence_batch_size != spectrogram_batch_size:
+ raise ValueError(
+ f"The batch size of `noise_sequence` is {noise_sequence_batch_size} and the batch size of"
+ f" `input_features` is {spectrogram_batch_size}, but the two are expected to be equal."
+ )
+
+ if padding_mask is not None:
+ if padding_mask.dim() == 1:
+ padding_mask = padding_mask.unsqueeze(0)
+ padding_mask_batch_size = padding_mask.shape[0]
+ if padding_mask_batch_size != spectrogram_batch_size:
+ raise ValueError(
+ f"The batch size of `padding_mask` is {padding_mask_batch_size} and the batch size of"
+ f" `input_features` is {spectrogram_batch_size}, but the two are expected to be equal."
+ )
+
+ # Change shapes to have channels before sequence lengths
+ hidden_states = noise_sequence.transpose(2, 1)
+ input_features = input_features.transpose(2, 1)
+
+ hidden_states = self.conv_pre(hidden_states)
+
+ for resblock in self.resblocks:
+ hidden_states = resblock(hidden_states, input_features)
+
+ hidden_states = nn.functional.leaky_relu(hidden_states, self.leaky_relu_slope)
+ hidden_states = self.conv_post(hidden_states)
+ hidden_states = torch.tanh(hidden_states)
+
+ # Remove sequence length dimension since this collapses to 1
+ # NOTE: keep waveforms batched even if there's only one
+ waveform = hidden_states.squeeze(1)
+
+ # Get sequence lengths for UnivNetFeatureExtractor.batch_decode.
+ waveform_lengths = None
+ if padding_mask is not None:
+ # Padding is always contiguous and added on the right
+ waveform_lengths = torch.sum(padding_mask, dim=1)
+
+ if not return_dict:
+ outputs = (waveform, waveform_lengths)
+ return outputs
+
+ return UnivNetModelOutput(
+ waveforms=waveform,
+ waveform_lengths=waveform_lengths,
+ )
+
+ def _init_weights(self, module):
+ """Initialize the weights."""
+ if isinstance(module, (nn.Linear, nn.Conv1d, nn.ConvTranspose1d)):
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
+ if module.bias is not None:
+ module.bias.data.zero_()
+
+ def apply_weight_norm(self):
+ nn.utils.weight_norm(self.conv_pre)
+ for layer in self.resblocks:
+ layer.apply_weight_norm()
+ nn.utils.weight_norm(self.conv_post)
+
+ def remove_weight_norm(self):
+ nn.utils.remove_weight_norm(self.conv_pre)
+ for layer in self.resblocks:
+ layer.remove_weight_norm()
+ nn.utils.remove_weight_norm(self.conv_post)