diff --git a/.gitattributes b/.gitattributes index e5fcbf388dc1bdd59c398e763da67351173b9f4b..6eb9f6fb579b291e3d39a9bfac02febfe7abb01d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -78,3 +78,6 @@ venv/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so fil venv/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=lfs diff=lfs merge=lfs -text diff --git a/ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..0dad8c361651c5a26442fffb197fbc238b957090 --- /dev/null +++ b/ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f39fab6ed5a3a54402fd9808347560095421fc2856329a72817c4e1a996aff56 +size 33555612 diff --git a/ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..9bdb478352e101ede8d720ab5ab964f29d463756 --- /dev/null +++ b/ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18f3d314abd9a1d4e83088a29062f4a0b55412c39a441f31d779c6dc3c274434 +size 33555627 diff --git a/lm-evaluation-harness/tests/testdata/arc_easy-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/arc_easy-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..d82be433abe592079dc9ce67ec7e97fe668c8590 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/arc_easy-v0-loglikelihood @@ -0,0 +1 @@ +ffa6e39a35a16299dcb015f17f986aaa598ad8b4840c4cebe0339a7042232741 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_2da-v0-res.json b/lm-evaluation-harness/tests/testdata/arithmetic_2da-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..874256a0b8ae0c6fe4874498ecb9e73f383f0d60 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/arithmetic_2da-v0-res.json @@ -0,0 +1 @@ +{"results": {"arithmetic_2da": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_2da": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_4da-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/arithmetic_4da-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..b52790c74b649b455fd90ca93cc70ad23c3d129b --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/arithmetic_4da-v0-loglikelihood @@ -0,0 +1 @@ +d3557beb8b9e5704122c2fc6362b11fbe2c3f2f3cb72aed4462b208767c40e01 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_4ds-v0-res.json b/lm-evaluation-harness/tests/testdata/arithmetic_4ds-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..4321db2604d4ef8d992f587841264964acfb065f --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/arithmetic_4ds-v0-res.json @@ -0,0 +1 @@ +{"results": {"arithmetic_4ds": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_4ds": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..276f03f76d1f76f242415e9cdeabf368c9a0f8ce --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_determiner_noun_agreement_with_adj_irregular_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_with_adj_irregular_2": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_npi_present_2-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_npi_present_2-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..efe40ced37f6a7890d247b0292e80d55dde1849c --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_npi_present_2-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_npi_present_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_npi_present_2": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..321702a66e7f2a1e762a4f9b9ae4b99a6f813c3b --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_only_npi_licensor_present": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_only_npi_licensor_present": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_principle_A_c_command-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_principle_A_c_command-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..43fadc2e0b0ea5cd762868a13629b85daec7f499 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_principle_A_c_command-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_principle_A_c_command": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_c_command": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..a26cb174a06e1941ae79e137161d85c4f5814838 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-loglikelihood @@ -0,0 +1 @@ +973fe56534fdef1207f0fc08dd09a210304c55f33c6cbb17552754bf54f11c86 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..4b21da71d54c0a8fe09f204dd4a78f0841c6ae85 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_wh_questions_subject_gap": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_questions_subject_gap": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap_long_distance-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap_long_distance-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..13359ac3d2092bb8d38d44f17a125124c034d317 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap_long_distance-v0-loglikelihood @@ -0,0 +1 @@ +a142cc2a6fcd93230b650927b07367cad957b8f3f42cb4072151da53dea301df \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/coqa-v0-greedy_until b/lm-evaluation-harness/tests/testdata/coqa-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..c1a9e165a7e42191745e38b1dd8d6b9e2fe609cb --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/coqa-v0-greedy_until @@ -0,0 +1 @@ +4a8605d5deed0423ec095700251ed93325b45d320aca35d4ce1e94702094435e \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_socioeconomic-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_french_socioeconomic-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..2f6455aec029fea8d7ee8fa866e9f7779ac99914 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_socioeconomic-v0-loglikelihood @@ -0,0 +1 @@ +8ba0a525c65f795c99f6416e70c998e75e4b6cc43bf9a4bd7ccacd3c3591e9cb \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-res.json b/lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..857af346b47d7ce11ee4192b928608a2111776f4 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-res.json @@ -0,0 +1 @@ +{"results": {"ethics_utilitarianism": {"acc": 0.49771214642262895, "acc_stderr": 0.007211546310787838}}, "versions": {"ethics_utilitarianism": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-econometrics-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-econometrics-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..ed3332eddaf041c82908352c43cf8d9187b8f381 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-econometrics-v0-loglikelihood @@ -0,0 +1 @@ +cde76ba2c7382b4876e17136c94f52aca2774e50342ab757b2a2d18da370dcb6 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_geography-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_geography-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..ac80d178809ddbacc3aeb8ff368d8a68605a6430 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_geography-v0-loglikelihood @@ -0,0 +1 @@ +add45970ea3865be7c7a31f788a835949f6937ac73f699b122ca56a3431e95f8 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-international_law-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-international_law-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..bd4edd2394a4ccaf2d75c578a9f45ad614657dd8 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-international_law-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-international_law": {"acc": 0.2396694214876033, "acc_norm": 0.3140495867768595, "acc_norm_stderr": 0.042369647530410164, "acc_stderr": 0.03896878985070417}}, "versions": {"hendrycksTest-international_law": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-medical_genetics-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-medical_genetics-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..48d49de8399fba6cfb50dd98d3cbcf8d39388ab2 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-medical_genetics-v0-loglikelihood @@ -0,0 +1 @@ +db6141246889a19dd3f6b9109f314d49c1a70f7a98795858804378b095c4a2fe \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-medical_genetics-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-medical_genetics-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..eac53bcf4a7610934b697d6d19f53ecdf5d4a4ad --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-medical_genetics-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-medical_genetics": {"acc": 0.27, "acc_norm": 0.29, "acc_norm_stderr": 0.04560480215720684, "acc_stderr": 0.0446196043338474}}, "versions": {"hendrycksTest-medical_genetics": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..953fc3be48759378aea33eb767cb7367514a5de9 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-loglikelihood @@ -0,0 +1 @@ +d6ef028022c02b69d1516973e08bebaa14d8debcf2589a2bb124823178202d20 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..26ea1c2a75ccfb96af880ee30eef11520e9ea39c --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-moral_disputes": {"acc": 0.24855491329479767, "acc_norm": 0.27167630057803466, "acc_norm_stderr": 0.023948512905468365, "acc_stderr": 0.023267528432100174}}, "versions": {"hendrycksTest-moral_disputes": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-philosophy-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-philosophy-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..ec9c1e79c117c88246fa596ca90821025c9786af --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-philosophy-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-philosophy": {"acc": 0.26366559485530544, "acc_norm": 0.2733118971061093, "acc_norm_stderr": 0.02531176597542612, "acc_stderr": 0.02502553850053234}}, "versions": {"hendrycksTest-philosophy": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-us_foreign_policy-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-us_foreign_policy-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..eed85dbaf98e67cc5cd0876bc5f73f3f9fb186fb --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-us_foreign_policy-v0-loglikelihood @@ -0,0 +1 @@ +a1a338d0083a21054f74d36a296d6bd8e2e457327c0fd630bebcc61ed758044d \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-virology-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-virology-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..0004b194049a5dce0266002b4a19882fbb8c6bfa --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-virology-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-virology": {"acc": 0.27710843373493976, "acc_norm": 0.2710843373493976, "acc_norm_stderr": 0.03460579907553027, "acc_stderr": 0.034843315926805875}}, "versions": {"hendrycksTest-virology": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-world_religions-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-world_religions-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..0fff75a7eaf2e0773a7e3dcda446f59a59dad878 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-world_religions-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-world_religions": {"acc": 0.21637426900584794, "acc_norm": 0.22807017543859648, "acc_norm_stderr": 0.03218093795602357, "acc_stderr": 0.03158149539338734}}, "versions": {"hendrycksTest-world_religions": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/iwslt17-ar-en-v0-greedy_until b/lm-evaluation-harness/tests/testdata/iwslt17-ar-en-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..82921d1db066020f53d61c21d46498a512144b37 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/iwslt17-ar-en-v0-greedy_until @@ -0,0 +1 @@ +e94d310de91fad7ce36f4cf3305552020221482c5588f2efcefaa019893504f1 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/lambada-v0-res.json b/lm-evaluation-harness/tests/testdata/lambada-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..ead0e9ce5d9629dea9be37e521fb3a152ced8680 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/lambada-v0-res.json @@ -0,0 +1 @@ +{"results": {"lambada": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/lambada_mt_en-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada_mt_en-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..efd450a8f2a4ca067f7380af809fdda48d1ee465 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/lambada_mt_en-v0-loglikelihood @@ -0,0 +1 @@ +6829e6a8aa5922e6c92dd31403cc060f242dc0ede4a775e085a70da095ab2e20 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/lambada_mt_it-v0-res.json b/lm-evaluation-harness/tests/testdata/lambada_mt_it-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..b652210ae3df4694785f6bfe6543909435122dee --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/lambada_mt_it-v0-res.json @@ -0,0 +1 @@ +{"results": {"lambada_mt_it": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_mt_it": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada_openai-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..efd450a8f2a4ca067f7380af809fdda48d1ee465 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/lambada_openai-v0-loglikelihood @@ -0,0 +1 @@ +6829e6a8aa5922e6c92dd31403cc060f242dc0ede4a775e085a70da095ab2e20 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai_mt_es-v0-res.json b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_es-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..684e35a4cf44f85a0dc5f82fc06fb2b4ebc90316 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_es-v0-res.json @@ -0,0 +1 @@ +{"results": {"lambada_openai_mt_es": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_mt_es": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/math_counting_and_prob-v0-res.json b/lm-evaluation-harness/tests/testdata/math_counting_and_prob-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..8ee1d031de8ec7d2af61c83567d433f9116ba24d --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/math_counting_and_prob-v0-res.json @@ -0,0 +1 @@ +{"results": {"math_counting_and_prob": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_counting_and_prob": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v1-greedy_until b/lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v1-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..3ab10de26a038019a18699e20887de6da66981c4 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v1-greedy_until @@ -0,0 +1 @@ +d53c699de272d517ed7ad783b4e692302be9f9f97a8d4ac7a6541e538a7cabe0 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/math_num_theory-v0-greedy_until b/lm-evaluation-harness/tests/testdata/math_num_theory-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..82febb9f5dfeefbd6dc5d244574ac5666c6b8bba --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/math_num_theory-v0-greedy_until @@ -0,0 +1 @@ +b920ccb507afdcf3ef6f4c04891913731e9f32ec914801791c6d9f8abf6e1897 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/mnli_mismatched-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/mnli_mismatched-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..3fb242da3a2d274cbcc84bf86a6bb11f02df27ab --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/mnli_mismatched-v0-loglikelihood @@ -0,0 +1 @@ +3784acf322e79f31702a7a0612030e4ba5c4fc466ad976a34ee3f3d7278c01f0 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/mrpc-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/mrpc-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..95c849a153762819da1ce59c1b58a2013b97ef6a --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/mrpc-v0-loglikelihood @@ -0,0 +1 @@ +9f54cbff8d6accba99cfa2c4c4b359563313941018173d7dcf9e32dc28c06583 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/mutual-v1-res.json b/lm-evaluation-harness/tests/testdata/mutual-v1-res.json new file mode 100644 index 0000000000000000000000000000000000000000..42e97c6f1a1b65ef76cc3941c8b08e8ca836a59c --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/mutual-v1-res.json @@ -0,0 +1 @@ +{"results": {"mutual": {"mrr": 0.5023513920240772, "mrr_stderr": 0.009501864812936679, "r@1": 0.22460496613995484, "r@1_stderr": 0.014028122493992806, "r@2": 0.4706546275395034, "r@2_stderr": 0.016778343895001414}}, "versions": {"mutual": 1}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/mutual_plus-v0-res.json b/lm-evaluation-harness/tests/testdata/mutual_plus-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..9c0348826363dfe77e60cb28cf546110e25bab35 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/mutual_plus-v0-res.json @@ -0,0 +1 @@ +{"results": {"mutual_plus": {"mrr": 0.5275583145221953, "mrr_stderr": 0.009940894824430708, "r@1": 0.2595936794582393, "r@1_stderr": 0.014737047402750955, "r@2": 0.45372460496614, "r@2_stderr": 0.01673517854461967}}, "versions": {"mutual_plus": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_books3-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_books3-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..df19cd0a18f122d695f8aea4a717ab4dde79a987 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_books3-v0-res.json @@ -0,0 +1 @@ +{"results": {"pile_books3": {"bits_per_byte": 8.942486206275221e-07, "byte_perplexity": 1.0000008942490204, "word_perplexity": 1.0000052870063607}}, "versions": {"pile_books3": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_europarl-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_europarl-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..4c53edd2ce620475c056ccca5cde73380c246074 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_europarl-v0-res.json @@ -0,0 +1 @@ +{"results": {"pile_europarl": {"bits_per_byte": 8.648858203555344e-06, "byte_perplexity": 1.000008648895605, "word_perplexity": 1.000063506523818}}, "versions": {"pile_europarl": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_openwebtext2-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_openwebtext2-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..ead8d0b0bffe2da65e2602c7f2f352eeb404ef26 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_openwebtext2-v0-res.json @@ -0,0 +1 @@ +{"results": {"pile_openwebtext2": {"bits_per_byte": 0.00012809520662477846, "byte_perplexity": 1.000128103411166, "word_perplexity": 1.0007951516532847}}, "versions": {"pile_openwebtext2": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_stackexchange-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_stackexchange-v1-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..dcf0e64cf0d4bf4fe719b8d349c1d36484d2047f --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_stackexchange-v1-loglikelihood_rolling @@ -0,0 +1 @@ +e524bfb3e21cbdaddc117403a50df598520c7bf5b2c60ad8f2372cfa564e79be \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_ubuntu-irc-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_ubuntu-irc-v0-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..ce041998635643ee17aace3105b227ef0746917e --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_ubuntu-irc-v0-loglikelihood_rolling @@ -0,0 +1 @@ +4eb69e314f0864ec8890e2323d7e76f8a8309692c4f090e2b41bf4be681a811d \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pubmedqa-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/pubmedqa-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..97db87ce2be9b3d2c08479ee73c7ba3923817795 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pubmedqa-v0-loglikelihood @@ -0,0 +1 @@ +7a04a1fb1d2b19db84fd15c224015d6c0306a41195a4e71fe6abd48fb4d53b9f \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/random_insertion-v0-greedy_until b/lm-evaluation-harness/tests/testdata/random_insertion-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..4844e5393b8358d225f516f1a948f1deccab7840 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/random_insertion-v0-greedy_until @@ -0,0 +1 @@ +6c48baa6924f3635120f33062251c4b571b3d4e9fe46b14d91f54ddd1c857997 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/record-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/record-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..a54fa05cd1ac551a973ff8155ddca6d868a49b42 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/record-v0-loglikelihood @@ -0,0 +1 @@ +a3e378fbde4e28f375cac1561bbfc7d7673c2af193628a774ad012d5192393aa \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/sciq-v0-res.json b/lm-evaluation-harness/tests/testdata/sciq-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..7071515827af18b10a7b3607e6249ed3e7c1929e --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/sciq-v0-res.json @@ -0,0 +1 @@ +{"results": {"sciq": {"acc": 0.234, "acc_norm": 0.239, "acc_norm_stderr": 0.01349300044693758, "acc_stderr": 0.01339490288966001}}, "versions": {"sciq": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/squad2-v1-loglikelihood b/lm-evaluation-harness/tests/testdata/squad2-v1-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..2c970f7583b3d8236d9ca2e802ce6e0403b36074 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/squad2-v1-loglikelihood @@ -0,0 +1 @@ +f5da6173402b274dc89130755c222c6ca6b2a3bacaaa4e4ab07be9322b7bad65 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/truthfulqa_mc-v1-res.json b/lm-evaluation-harness/tests/testdata/truthfulqa_mc-v1-res.json new file mode 100644 index 0000000000000000000000000000000000000000..c1b1854c2e0abc9c4fe8096b4d45004bcc1a381b --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/truthfulqa_mc-v1-res.json @@ -0,0 +1 @@ +{"results": {"truthfulqa_mc": {"mc1": 0.23255813953488372, "mc1_stderr": 0.01478915753108052, "mc2": 0.4462325560722362, "mc2_stderr": 0.004986523944692003}}, "versions": {"truthfulqa_mc": 1}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wikitext-v0-res.json b/lm-evaluation-harness/tests/testdata/wikitext-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..9ac0c37bb5aa8cdde37bf84c61a0d020c8a03900 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wikitext-v0-res.json @@ -0,0 +1 @@ +{"results": {"wikitext": {"bits_per_byte": 2.219817611605802e-05, "byte_perplexity": 1.0000221984224973, "word_perplexity": 1.000118710696617}}, "versions": {"wikitext": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-de-en-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-de-en-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..c02fb9875d5354fdb0892b7493a822ee4af9d6c2 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-de-en-v0-greedy_until @@ -0,0 +1 @@ +d13b5a6915ca86ac6c6ebc50d9be0d0be3dfca600c12e896df53190d875de74d \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-ja-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-en-ja-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..9777002c79830918a3939ec6978d606ae967ffe6 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-en-ja-v0-greedy_until @@ -0,0 +1 @@ +7fe61f5847a51e93e97c84b39f4420978727754e4b6cf636a27851c615857530 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-ja-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-en-ja-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..57bad300d7f05b7eb0908ad655b0fc90a2bb9c26 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-en-ja-v0-res.json @@ -0,0 +1 @@ +{"results": {"wmt20-en-ja": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 4.1308658294778584e-05, "chrf_stderr": 2.0456539027807417e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-ja": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-ja-v1-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-en-ja-v1-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..9777002c79830918a3939ec6978d606ae967ffe6 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-en-ja-v1-greedy_until @@ -0,0 +1 @@ +7fe61f5847a51e93e97c84b39f4420978727754e4b6cf636a27851c615857530 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..b6d0c71ad7c140790040022c51970dea075ab0b1 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-ru-en-v0-res.json @@ -0,0 +1 @@ +{"results": {"wmt20-ru-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.013344639906399232, "chrf_stderr": 7.583552652374546e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-ru-en": 0}} \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 b/venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 new file mode 100644 index 0000000000000000000000000000000000000000..5db69fe303824544f2919fb711fe74ffd57f4441 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7380496d2fddf51b9d1fd0671117c8ab07f2cae6b2b0dc0186a46cabb8e7859 +size 19654024 diff --git a/venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so b/venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so new file mode 100644 index 0000000000000000000000000000000000000000..901a57ce0b9d76676380da75fcd69eefc7056b71 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14a8722d7ed23ef60665772b3ab9f095e58981061c49925ed0f58a7dfa529d8c +size 2845200 diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.test1.jsn.gz b/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.test1.jsn.gz new file mode 100644 index 0000000000000000000000000000000000000000..14b961bd7fd75717fe055182f368148e2b327f39 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.test1.jsn.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:90bc66c0c547b5fce91ea073b458df63f3d30a5a1a5e97c7abd0c30eccdbf169 +size 323 diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.testDate1900.jsn.gz b/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.testDate1900.jsn.gz new file mode 100644 index 0000000000000000000000000000000000000000..d743adc079ea1c06583e20e8e1dba371af4d260a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.testDate1900.jsn.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a167fb78147766d380f75393bdd790258a2cd5a9f9ea6b06b09c2118e6a7de5a +size 182453 diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/decimal.jsn.gz b/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/decimal.jsn.gz new file mode 100644 index 0000000000000000000000000000000000000000..4614e5bc91ea893674d4217c686f3783a019cd53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/decimal.jsn.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91313261d3c3000485517f0d8b2af2e6644317e63e2ecae14808dbbb8e779af0 +size 19313 diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.some-named-index.parquet b/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.some-named-index.parquet new file mode 100644 index 0000000000000000000000000000000000000000..cc4ac04df23cf058f505abd4c6ffd75464b81d6c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.some-named-index.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5468128ea8a1091b5d07195471f3f9b3705247b69440aba45be6c68092dffc76 +size 4008 diff --git a/venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so b/venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so new file mode 100644 index 0000000000000000000000000000000000000000..31f9fd4fc7aeb31adca8529c15314c073f876731 --- /dev/null +++ b/venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a07646db6cdda85cca1d5170434c87be127895cdde4d45a3704578eab2b4f53 +size 85435777 diff --git a/venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2def7a5e0e2f8855744af38dac734fdbfc704969 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/beit/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert/configuration_bert.py b/venv/lib/python3.10/site-packages/transformers/models/bert/configuration_bert.py new file mode 100644 index 0000000000000000000000000000000000000000..e692f8284c2bac09fc441f8a8f5a13ecab099819 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/bert/configuration_bert.py @@ -0,0 +1,153 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" BERT model configuration""" +from collections import OrderedDict +from typing import Mapping + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class BertConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`BertModel`] or a [`TFBertModel`]. It is used to + instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the BERT + [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + position_embedding_type (`str`, *optional*, defaults to `"absolute"`): + Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For + positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to + [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). + For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models + with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). + is_decoder (`bool`, *optional*, defaults to `False`): + Whether the model is used as a decoder or not. If `False`, the model is used as an encoder. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + + Examples: + + ```python + >>> from transformers import BertConfig, BertModel + + >>> # Initializing a BERT google-bert/bert-base-uncased style configuration + >>> configuration = BertConfig() + + >>> # Initializing a model (with random weights) from the google-bert/bert-base-uncased style configuration + >>> model = BertModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "bert" + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + pad_token_id=0, + position_embedding_type="absolute", + use_cache=True, + classifier_dropout=None, + **kwargs, + ): + super().__init__(pad_token_id=pad_token_id, **kwargs) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.hidden_act = hidden_act + self.intermediate_size = intermediate_size + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.position_embedding_type = position_embedding_type + self.use_cache = use_cache + self.classifier_dropout = classifier_dropout + + +class BertOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task == "multiple-choice": + dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} + else: + dynamic_axis = {0: "batch", 1: "sequence"} + return OrderedDict( + [ + ("input_ids", dynamic_axis), + ("attention_mask", dynamic_axis), + ("token_type_ids", dynamic_axis), + ] + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py b/venv/lib/python3.10/site-packages/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py new file mode 100644 index 0000000000000000000000000000000000000000..f7cb149053a3d06a8b8fc1bcc2bc8729c9213771 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/bert/convert_bert_pytorch_checkpoint_to_original_tf.py @@ -0,0 +1,112 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Convert Huggingface Pytorch checkpoint to Tensorflow checkpoint.""" + +import argparse +import os + +import numpy as np +import tensorflow as tf +import torch + +from transformers import BertModel + + +def convert_pytorch_checkpoint_to_tf(model: BertModel, ckpt_dir: str, model_name: str): + """ + Args: + model: BertModel Pytorch model instance to be converted + ckpt_dir: Tensorflow model directory + model_name: model name + + Currently supported HF models: + + - Y BertModel + - N BertForMaskedLM + - N BertForPreTraining + - N BertForMultipleChoice + - N BertForNextSentencePrediction + - N BertForSequenceClassification + - N BertForQuestionAnswering + """ + + tensors_to_transpose = ("dense.weight", "attention.self.query", "attention.self.key", "attention.self.value") + + var_map = ( + ("layer.", "layer_"), + ("word_embeddings.weight", "word_embeddings"), + ("position_embeddings.weight", "position_embeddings"), + ("token_type_embeddings.weight", "token_type_embeddings"), + (".", "/"), + ("LayerNorm/weight", "LayerNorm/gamma"), + ("LayerNorm/bias", "LayerNorm/beta"), + ("weight", "kernel"), + ) + + if not os.path.isdir(ckpt_dir): + os.makedirs(ckpt_dir) + + state_dict = model.state_dict() + + def to_tf_var_name(name: str): + for patt, repl in iter(var_map): + name = name.replace(patt, repl) + return f"bert/{name}" + + def create_tf_var(tensor: np.ndarray, name: str, session: tf.Session): + tf_dtype = tf.dtypes.as_dtype(tensor.dtype) + tf_var = tf.get_variable(dtype=tf_dtype, shape=tensor.shape, name=name, initializer=tf.zeros_initializer()) + session.run(tf.variables_initializer([tf_var])) + session.run(tf_var) + return tf_var + + tf.reset_default_graph() + with tf.Session() as session: + for var_name in state_dict: + tf_name = to_tf_var_name(var_name) + torch_tensor = state_dict[var_name].numpy() + if any(x in var_name for x in tensors_to_transpose): + torch_tensor = torch_tensor.T + tf_var = create_tf_var(tensor=torch_tensor, name=tf_name, session=session) + tf_var.assign(tf.cast(torch_tensor, tf_var.dtype)) + tf_weight = session.run(tf_var) + print(f"Successfully created {tf_name}: {np.allclose(tf_weight, torch_tensor)}") + + saver = tf.train.Saver(tf.trainable_variables()) + saver.save(session, os.path.join(ckpt_dir, model_name.replace("-", "_") + ".ckpt")) + + +def main(raw_args=None): + parser = argparse.ArgumentParser() + parser.add_argument("--model_name", type=str, required=True, help="model name e.g. google-bert/bert-base-uncased") + parser.add_argument( + "--cache_dir", type=str, default=None, required=False, help="Directory containing pytorch model" + ) + parser.add_argument("--pytorch_model_path", type=str, required=True, help="/path/to/.bin") + parser.add_argument("--tf_cache_dir", type=str, required=True, help="Directory in which to save tensorflow model") + args = parser.parse_args(raw_args) + + model = BertModel.from_pretrained( + pretrained_model_name_or_path=args.model_name, + state_dict=torch.load(args.pytorch_model_path), + cache_dir=args.cache_dir, + ) + + convert_pytorch_checkpoint_to_tf(model=model, ckpt_dir=args.tf_cache_dir, model_name=args.model_name) + + +if __name__ == "__main__": + main() diff --git a/venv/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert_fast.py b/venv/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..f48977728470299d7613a994c24b8b5f992e33bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/bert/tokenization_bert_fast.py @@ -0,0 +1,172 @@ +# coding=utf-8 +# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Fast Tokenization classes for Bert.""" + +import json +from typing import List, Optional, Tuple + +from tokenizers import normalizers + +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging +from .tokenization_bert import BertTokenizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} + + +class BertTokenizerFast(PreTrainedTokenizerFast): + r""" + Construct a "fast" BERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + clean_text (`bool`, *optional*, defaults to `True`): + Whether or not to clean the text before tokenization by removing any control characters and replacing all + whitespaces by the classic one. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this + issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + wordpieces_prefix (`str`, *optional*, defaults to `"##"`): + The prefix for subwords. + """ + + vocab_files_names = VOCAB_FILES_NAMES + slow_tokenizer_class = BertTokenizer + + def __init__( + self, + vocab_file=None, + tokenizer_file=None, + do_lower_case=True, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs, + ): + super().__init__( + vocab_file, + tokenizer_file=tokenizer_file, + do_lower_case=do_lower_case, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) + if ( + normalizer_state.get("lowercase", do_lower_case) != do_lower_case + or normalizer_state.get("strip_accents", strip_accents) != strip_accents + or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars + ): + normalizer_class = getattr(normalizers, normalizer_state.pop("type")) + normalizer_state["lowercase"] = do_lower_case + normalizer_state["strip_accents"] = strip_accents + normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars + self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) + + self.do_lower_case = do_lower_case + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A BERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + + if token_ids_1 is not None: + output += token_ids_1 + [self.sep_token_id] + + return output + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..043caf7b7526fc6e70e7675363b20160612d01c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__init__.py @@ -0,0 +1,71 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, + is_vision_available, +) + + +_import_structure = { + "configuration_mobilevitv2": [ + "MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP", + "MobileViTV2Config", + "MobileViTV2OnnxConfig", + ], +} + + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_mobilevitv2"] = [ + "MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST", + "MobileViTV2ForImageClassification", + "MobileViTV2ForSemanticSegmentation", + "MobileViTV2Model", + "MobileViTV2PreTrainedModel", + ] + +if TYPE_CHECKING: + from .configuration_mobilevitv2 import ( + MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP, + MobileViTV2Config, + MobileViTV2OnnxConfig, + ) + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_mobilevitv2 import ( + MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, + MobileViTV2ForImageClassification, + MobileViTV2ForSemanticSegmentation, + MobileViTV2Model, + MobileViTV2PreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac48c1bcb2279d6ae2012d9e9e198ed93a54c6f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/configuration_mobilevitv2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/configuration_mobilevitv2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f9837e43cacc891077e7f317018d2c6f585992f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/configuration_mobilevitv2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..922f38862955c09c679b364e894b094d1203e1ed Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/convert_mlcvnets_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/modeling_mobilevitv2.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/modeling_mobilevitv2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7e839b02659601515e4d6865a15737d36f5b3a7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/__pycache__/modeling_mobilevitv2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/configuration_mobilevitv2.py b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/configuration_mobilevitv2.py new file mode 100644 index 0000000000000000000000000000000000000000..f8f1be141b52bd196424801a89e912432af76987 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/configuration_mobilevitv2.py @@ -0,0 +1,168 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" MobileViTV2 model configuration""" + +from collections import OrderedDict +from typing import Mapping + +from packaging import version + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import MOBILEVITV2_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class MobileViTV2Config(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`MobileViTV2Model`]. It is used to instantiate a + MobileViTV2 model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the MobileViTV2 + [apple/mobilevitv2-1.0](https://huggingface.co/apple/mobilevitv2-1.0) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + image_size (`int`, *optional*, defaults to 256): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 2): + The size (resolution) of each patch. + expand_ratio (`float`, *optional*, defaults to 2.0): + Expansion factor for the MobileNetv2 layers. + hidden_act (`str` or `function`, *optional*, defaults to `"swish"`): + The non-linear activation function (function or string) in the Transformer encoder and convolution layers. + conv_kernel_size (`int`, *optional*, defaults to 3): + The size of the convolutional kernel in the MobileViTV2 layer. + output_stride (`int`, *optional*, defaults to 32): + The ratio of the spatial resolution of the output to the resolution of the input image. + classifier_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for attached classifiers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + aspp_out_channels (`int`, *optional*, defaults to 512): + Number of output channels used in the ASPP layer for semantic segmentation. + atrous_rates (`List[int]`, *optional*, defaults to `[6, 12, 18]`): + Dilation (atrous) factors used in the ASPP layer for semantic segmentation. + aspp_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the ASPP layer for semantic segmentation. + semantic_loss_ignore_index (`int`, *optional*, defaults to 255): + The index that is ignored by the loss function of the semantic segmentation model. + n_attn_blocks (`List[int]`, *optional*, defaults to `[2, 4, 3]`): + The number of attention blocks in each MobileViTV2Layer + base_attn_unit_dims (`List[int]`, *optional*, defaults to `[128, 192, 256]`): + The base multiplier for dimensions of attention blocks in each MobileViTV2Layer + width_multiplier (`float`, *optional*, defaults to 1.0): + The width multiplier for MobileViTV2. + ffn_multiplier (`int`, *optional*, defaults to 2): + The FFN multiplier for MobileViTV2. + attn_dropout (`float`, *optional*, defaults to 0.0): + The dropout in the attention layer. + ffn_dropout (`float`, *optional*, defaults to 0.0): + The dropout between FFN layers. + + Example: + + ```python + >>> from transformers import MobileViTV2Config, MobileViTV2Model + + >>> # Initializing a mobilevitv2-small style configuration + >>> configuration = MobileViTV2Config() + + >>> # Initializing a model from the mobilevitv2-small style configuration + >>> model = MobileViTV2Model(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "mobilevitv2" + + def __init__( + self, + num_channels=3, + image_size=256, + patch_size=2, + expand_ratio=2.0, + hidden_act="swish", + conv_kernel_size=3, + output_stride=32, + classifier_dropout_prob=0.1, + initializer_range=0.02, + layer_norm_eps=1e-5, + aspp_out_channels=512, + atrous_rates=[6, 12, 18], + aspp_dropout_prob=0.1, + semantic_loss_ignore_index=255, + n_attn_blocks=[2, 4, 3], + base_attn_unit_dims=[128, 192, 256], + width_multiplier=1.0, + ffn_multiplier=2, + attn_dropout=0.0, + ffn_dropout=0.0, + **kwargs, + ): + super().__init__(**kwargs) + + self.num_channels = num_channels + self.image_size = image_size + self.patch_size = patch_size + self.expand_ratio = expand_ratio + self.hidden_act = hidden_act + self.conv_kernel_size = conv_kernel_size + self.output_stride = output_stride + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.n_attn_blocks = n_attn_blocks + self.base_attn_unit_dims = base_attn_unit_dims + self.width_multiplier = width_multiplier + self.ffn_multiplier = ffn_multiplier + self.ffn_dropout = ffn_dropout + self.attn_dropout = attn_dropout + self.classifier_dropout_prob = classifier_dropout_prob + + # decode head attributes for semantic segmentation + self.aspp_out_channels = aspp_out_channels + self.atrous_rates = atrous_rates + self.aspp_dropout_prob = aspp_dropout_prob + self.semantic_loss_ignore_index = semantic_loss_ignore_index + + +class MobileViTV2OnnxConfig(OnnxConfig): + torch_onnx_minimum_version = version.parse("1.11") + + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + return OrderedDict([("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"})]) + + @property + def outputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task == "image-classification": + return OrderedDict([("logits", {0: "batch"})]) + else: + return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})]) + + @property + def atol_for_validation(self) -> float: + return 1e-4 diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..2e2d31295d7c58fa7c75cff883cfc0815ffa6cb5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/convert_mlcvnets_to_pytorch.py @@ -0,0 +1,326 @@ +# coding=utf-8 +# Copyright 2023 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert MobileViTV2 checkpoints from the ml-cvnets library.""" + + +import argparse +import collections +import json +from pathlib import Path + +import requests +import torch +import yaml +from huggingface_hub import hf_hub_download +from PIL import Image + +from transformers import ( + MobileViTImageProcessor, + MobileViTV2Config, + MobileViTV2ForImageClassification, + MobileViTV2ForSemanticSegmentation, +) +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + + +def load_orig_config_file(orig_cfg_file): + print("Loading config file...") + + def flatten_yaml_as_dict(d, parent_key="", sep="."): + items = [] + for k, v in d.items(): + new_key = parent_key + sep + k if parent_key else k + if isinstance(v, collections.abc.MutableMapping): + items.extend(flatten_yaml_as_dict(v, new_key, sep=sep).items()) + else: + items.append((new_key, v)) + return dict(items) + + config = argparse.Namespace() + with open(orig_cfg_file, "r") as yaml_file: + try: + cfg = yaml.load(yaml_file, Loader=yaml.FullLoader) + + flat_cfg = flatten_yaml_as_dict(cfg) + for k, v in flat_cfg.items(): + setattr(config, k, v) + except yaml.YAMLError as exc: + logger.error("Error while loading config file: {}. Error message: {}".format(orig_cfg_file, str(exc))) + return config + + +def get_mobilevitv2_config(task_name, orig_cfg_file): + config = MobileViTV2Config() + + is_segmentation_model = False + + # dataset + if task_name.startswith("imagenet1k_"): + config.num_labels = 1000 + if int(task_name.strip().split("_")[-1]) == 384: + config.image_size = 384 + else: + config.image_size = 256 + filename = "imagenet-1k-id2label.json" + elif task_name.startswith("imagenet21k_to_1k_"): + config.num_labels = 21000 + if int(task_name.strip().split("_")[-1]) == 384: + config.image_size = 384 + else: + config.image_size = 256 + filename = "imagenet-22k-id2label.json" + elif task_name.startswith("ade20k_"): + config.num_labels = 151 + config.image_size = 512 + filename = "ade20k-id2label.json" + is_segmentation_model = True + elif task_name.startswith("voc_"): + config.num_labels = 21 + config.image_size = 512 + filename = "pascal-voc-id2label.json" + is_segmentation_model = True + + # orig_config + orig_config = load_orig_config_file(orig_cfg_file) + assert getattr(orig_config, "model.classification.name", -1) == "mobilevit_v2", "Invalid model" + config.width_multiplier = getattr(orig_config, "model.classification.mitv2.width_multiplier", 1.0) + assert ( + getattr(orig_config, "model.classification.mitv2.attn_norm_layer", -1) == "layer_norm_2d" + ), "Norm layers other than layer_norm_2d is not supported" + config.hidden_act = getattr(orig_config, "model.classification.activation.name", "swish") + # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) + + if is_segmentation_model: + config.output_stride = getattr(orig_config, "model.segmentation.output_stride", 16) + if "_deeplabv3" in task_name: + config.atrous_rates = getattr(orig_config, "model.segmentation.deeplabv3.aspp_rates", [12, 24, 36]) + config.aspp_out_channels = getattr(orig_config, "model.segmentation.deeplabv3.aspp_out_channels", 512) + config.aspp_dropout_prob = getattr(orig_config, "model.segmentation.deeplabv3.aspp_dropout", 0.1) + + # id2label + repo_id = "huggingface/label-files" + id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) + id2label = {int(k): v for k, v in id2label.items()} + config.id2label = id2label + config.label2id = {v: k for k, v in id2label.items()} + + return config + + +def rename_key(dct, old, new): + val = dct.pop(old) + dct[new] = val + + +def create_rename_keys(state_dict, base_model=False): + if base_model: + model_prefix = "" + else: + model_prefix = "mobilevitv2." + + rename_keys = [] + for k in state_dict.keys(): + if k[:8] == "encoder.": + k_new = k[8:] + else: + k_new = k + + if ".block." in k: + k_new = k_new.replace(".block.", ".") + if ".conv." in k: + k_new = k_new.replace(".conv.", ".convolution.") + if ".norm." in k: + k_new = k_new.replace(".norm.", ".normalization.") + + if "conv_1." in k: + k_new = k_new.replace("conv_1.", f"{model_prefix}conv_stem.") + for i in [1, 2]: + if f"layer_{i}." in k: + k_new = k_new.replace(f"layer_{i}.", f"{model_prefix}encoder.layer.{i-1}.layer.") + if ".exp_1x1." in k: + k_new = k_new.replace(".exp_1x1.", ".expand_1x1.") + if ".red_1x1." in k: + k_new = k_new.replace(".red_1x1.", ".reduce_1x1.") + + for i in [3, 4, 5]: + if f"layer_{i}.0." in k: + k_new = k_new.replace(f"layer_{i}.0.", f"{model_prefix}encoder.layer.{i-1}.downsampling_layer.") + if f"layer_{i}.1.local_rep.0." in k: + k_new = k_new.replace(f"layer_{i}.1.local_rep.0.", f"{model_prefix}encoder.layer.{i-1}.conv_kxk.") + if f"layer_{i}.1.local_rep.1." in k: + k_new = k_new.replace(f"layer_{i}.1.local_rep.1.", f"{model_prefix}encoder.layer.{i-1}.conv_1x1.") + + for i in [3, 4, 5]: + if i == 3: + j_in = [0, 1] + elif i == 4: + j_in = [0, 1, 2, 3] + elif i == 5: + j_in = [0, 1, 2] + + for j in j_in: + if f"layer_{i}.1.global_rep.{j}." in k: + k_new = k_new.replace( + f"layer_{i}.1.global_rep.{j}.", f"{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}." + ) + if f"layer_{i}.1.global_rep.{j+1}." in k: + k_new = k_new.replace( + f"layer_{i}.1.global_rep.{j+1}.", f"{model_prefix}encoder.layer.{i-1}.layernorm." + ) + + if f"layer_{i}.1.conv_proj." in k: + k_new = k_new.replace(f"layer_{i}.1.conv_proj.", f"{model_prefix}encoder.layer.{i-1}.conv_projection.") + + if "pre_norm_attn.0." in k: + k_new = k_new.replace("pre_norm_attn.0.", "layernorm_before.") + if "pre_norm_attn.1." in k: + k_new = k_new.replace("pre_norm_attn.1.", "attention.") + if "pre_norm_ffn.0." in k: + k_new = k_new.replace("pre_norm_ffn.0.", "layernorm_after.") + if "pre_norm_ffn.1." in k: + k_new = k_new.replace("pre_norm_ffn.1.", "ffn.conv1.") + if "pre_norm_ffn.3." in k: + k_new = k_new.replace("pre_norm_ffn.3.", "ffn.conv2.") + + if "classifier.1." in k: + k_new = k_new.replace("classifier.1.", "classifier.") + + if "seg_head." in k: + k_new = k_new.replace("seg_head.", "segmentation_head.") + if ".aspp_layer." in k: + k_new = k_new.replace(".aspp_layer.", ".") + if ".aspp_pool." in k: + k_new = k_new.replace(".aspp_pool.", ".") + + rename_keys.append((k, k_new)) + return rename_keys + + +def remove_unused_keys(state_dict): + """remove unused keys (e.g.: seg_head.aux_head)""" + keys_to_ignore = [] + for k in state_dict.keys(): + if k.startswith("seg_head.aux_head."): + keys_to_ignore.append(k) + for k in keys_to_ignore: + state_dict.pop(k, None) + + +# We will verify our results on an image of cute cats +def prepare_img(): + url = "http://images.cocodataset.org/val2017/000000039769.jpg" + # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" + im = Image.open(requests.get(url, stream=True).raw) + return im + + +@torch.no_grad() +def convert_mobilevitv2_checkpoint(task_name, checkpoint_path, orig_config_path, pytorch_dump_folder_path): + """ + Copy/paste/tweak model's weights to our MobileViTV2 structure. + """ + config = get_mobilevitv2_config(task_name, orig_config_path) + + # load original state_dict + checkpoint = torch.load(checkpoint_path, map_location="cpu") + + # load huggingface model + if task_name.startswith("ade20k_") or task_name.startswith("voc_"): + model = MobileViTV2ForSemanticSegmentation(config).eval() + base_model = False + else: + model = MobileViTV2ForImageClassification(config).eval() + base_model = False + + # remove and rename some keys of load the original model + state_dict = checkpoint + remove_unused_keys(state_dict) + rename_keys = create_rename_keys(state_dict, base_model=base_model) + for rename_key_src, rename_key_dest in rename_keys: + rename_key(state_dict, rename_key_src, rename_key_dest) + + # load modified state_dict + model.load_state_dict(state_dict) + + # Check outputs on an image, prepared by MobileViTImageProcessor + image_processor = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 32) + encoding = image_processor(images=prepare_img(), return_tensors="pt") + outputs = model(**encoding) + + # verify classification model + if task_name.startswith("imagenet"): + logits = outputs.logits + predicted_class_idx = logits.argmax(-1).item() + print("Predicted class:", model.config.id2label[predicted_class_idx]) + if task_name.startswith("imagenet1k_256") and config.width_multiplier == 1.0: + # expected_logits for base variant + expected_logits = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01]) + assert torch.allclose(logits[0, :3], expected_logits, atol=1e-4) + + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + print(f"Saving model {task_name} to {pytorch_dump_folder_path}") + model.save_pretrained(pytorch_dump_folder_path) + print(f"Saving image processor to {pytorch_dump_folder_path}") + image_processor.save_pretrained(pytorch_dump_folder_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--task", + default="imagenet1k_256", + type=str, + help=( + "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " + """ + Classification (ImageNet-1k) + - MobileViTV2 (256x256) : imagenet1k_256 + - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384 + - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) : + imagenet21k_to_1k_256 + - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on + ImageNet-1k 384x384) : imagenet21k_to_1k_384 + Segmentation + - ADE20K Dataset : ade20k_deeplabv3 + - Pascal VOC 2012 Dataset: voc_deeplabv3 + """ + ), + choices=[ + "imagenet1k_256", + "imagenet1k_384", + "imagenet21k_to_1k_256", + "imagenet21k_to_1k_384", + "ade20k_deeplabv3", + "voc_deeplabv3", + ], + ) + + parser.add_argument( + "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." + ) + parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") + parser.add_argument( + "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." + ) + + args = parser.parse_args() + convert_mobilevitv2_checkpoint( + args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/modeling_mobilevitv2.py b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/modeling_mobilevitv2.py new file mode 100644 index 0000000000000000000000000000000000000000..c6c446b1862adc8487cd16aad3c725ac0896e205 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/mobilevitv2/modeling_mobilevitv2.py @@ -0,0 +1,1030 @@ +# coding=utf-8 +# Copyright 2023 Apple Inc. and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Original license: https://github.com/apple/ml-cvnets/blob/main/LICENSE +""" PyTorch MobileViTV2 model.""" + + +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_outputs import ( + BaseModelOutputWithNoAttention, + BaseModelOutputWithPoolingAndNoAttention, + ImageClassifierOutputWithNoAttention, + SemanticSegmenterOutput, +) +from ...modeling_utils import PreTrainedModel +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_mobilevitv2 import MobileViTV2Config + + +logger = logging.get_logger(__name__) + + +# General docstring +_CONFIG_FOR_DOC = "MobileViTV2Config" + +# Base docstring +_CHECKPOINT_FOR_DOC = "apple/mobilevitv2-1.0-imagenet1k-256" +_EXPECTED_OUTPUT_SHAPE = [1, 512, 8, 8] + +# Image classification docstring +_IMAGE_CLASS_CHECKPOINT = "apple/mobilevitv2-1.0-imagenet1k-256" +_IMAGE_CLASS_EXPECTED_OUTPUT = "tabby, tabby cat" + + +from ..deprecated._archive_maps import MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# Copied from transformers.models.mobilevit.modeling_mobilevit.make_divisible +def make_divisible(value: int, divisor: int = 8, min_value: Optional[int] = None) -> int: + """ + Ensure that all layers have a channel count that is divisible by `divisor`. This function is taken from the + original TensorFlow repo. It can be seen here: + https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + """ + if min_value is None: + min_value = divisor + new_value = max(min_value, int(value + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_value < 0.9 * value: + new_value += divisor + return int(new_value) + + +def clip(value: float, min_val: float = float("-inf"), max_val: float = float("inf")) -> float: + return max(min_val, min(max_val, value)) + + +# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTConvLayer with MobileViT->MobileViTV2 +class MobileViTV2ConvLayer(nn.Module): + def __init__( + self, + config: MobileViTV2Config, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + groups: int = 1, + bias: bool = False, + dilation: int = 1, + use_normalization: bool = True, + use_activation: Union[bool, str] = True, + ) -> None: + super().__init__() + padding = int((kernel_size - 1) / 2) * dilation + + if in_channels % groups != 0: + raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups.") + if out_channels % groups != 0: + raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups.") + + self.convolution = nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode="zeros", + ) + + if use_normalization: + self.normalization = nn.BatchNorm2d( + num_features=out_channels, + eps=1e-5, + momentum=0.1, + affine=True, + track_running_stats=True, + ) + else: + self.normalization = None + + if use_activation: + if isinstance(use_activation, str): + self.activation = ACT2FN[use_activation] + elif isinstance(config.hidden_act, str): + self.activation = ACT2FN[config.hidden_act] + else: + self.activation = config.hidden_act + else: + self.activation = None + + def forward(self, features: torch.Tensor) -> torch.Tensor: + features = self.convolution(features) + if self.normalization is not None: + features = self.normalization(features) + if self.activation is not None: + features = self.activation(features) + return features + + +# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTInvertedResidual with MobileViT->MobileViTV2 +class MobileViTV2InvertedResidual(nn.Module): + """ + Inverted residual block (MobileNetv2): https://arxiv.org/abs/1801.04381 + """ + + def __init__( + self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int, dilation: int = 1 + ) -> None: + super().__init__() + expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8) + + if stride not in [1, 2]: + raise ValueError(f"Invalid stride {stride}.") + + self.use_residual = (stride == 1) and (in_channels == out_channels) + + self.expand_1x1 = MobileViTV2ConvLayer( + config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1 + ) + + self.conv_3x3 = MobileViTV2ConvLayer( + config, + in_channels=expanded_channels, + out_channels=expanded_channels, + kernel_size=3, + stride=stride, + groups=expanded_channels, + dilation=dilation, + ) + + self.reduce_1x1 = MobileViTV2ConvLayer( + config, + in_channels=expanded_channels, + out_channels=out_channels, + kernel_size=1, + use_activation=False, + ) + + def forward(self, features: torch.Tensor) -> torch.Tensor: + residual = features + + features = self.expand_1x1(features) + features = self.conv_3x3(features) + features = self.reduce_1x1(features) + + return residual + features if self.use_residual else features + + +# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTMobileNetLayer with MobileViT->MobileViTV2 +class MobileViTV2MobileNetLayer(nn.Module): + def __init__( + self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int = 1, num_stages: int = 1 + ) -> None: + super().__init__() + + self.layer = nn.ModuleList() + for i in range(num_stages): + layer = MobileViTV2InvertedResidual( + config, + in_channels=in_channels, + out_channels=out_channels, + stride=stride if i == 0 else 1, + ) + self.layer.append(layer) + in_channels = out_channels + + def forward(self, features: torch.Tensor) -> torch.Tensor: + for layer_module in self.layer: + features = layer_module(features) + return features + + +class MobileViTV2LinearSelfAttention(nn.Module): + """ + This layer applies a self-attention with linear complexity, as described in MobileViTV2 paper: + https://arxiv.org/abs/2206.02680 + + Args: + config (`MobileVitv2Config`): + Model configuration object + embed_dim (`int`): + `input_channels` from an expected input of size :math:`(batch_size, input_channels, height, width)` + """ + + def __init__(self, config: MobileViTV2Config, embed_dim: int) -> None: + super().__init__() + + self.qkv_proj = MobileViTV2ConvLayer( + config=config, + in_channels=embed_dim, + out_channels=1 + (2 * embed_dim), + bias=True, + kernel_size=1, + use_normalization=False, + use_activation=False, + ) + + self.attn_dropout = nn.Dropout(p=config.attn_dropout) + self.out_proj = MobileViTV2ConvLayer( + config=config, + in_channels=embed_dim, + out_channels=embed_dim, + bias=True, + kernel_size=1, + use_normalization=False, + use_activation=False, + ) + self.embed_dim = embed_dim + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + # (batch_size, embed_dim, num_pixels_in_patch, num_patches) --> (batch_size, 1+2*embed_dim, num_pixels_in_patch, num_patches) + qkv = self.qkv_proj(hidden_states) + + # Project hidden_states into query, key and value + # Query --> [batch_size, 1, num_pixels_in_patch, num_patches] + # value, key --> [batch_size, embed_dim, num_pixels_in_patch, num_patches] + query, key, value = torch.split(qkv, split_size_or_sections=[1, self.embed_dim, self.embed_dim], dim=1) + + # apply softmax along num_patches dimension + context_scores = torch.nn.functional.softmax(query, dim=-1) + context_scores = self.attn_dropout(context_scores) + + # Compute context vector + # [batch_size, embed_dim, num_pixels_in_patch, num_patches] x [batch_size, 1, num_pixels_in_patch, num_patches] -> [batch_size, embed_dim, num_pixels_in_patch, num_patches] + context_vector = key * context_scores + # [batch_size, embed_dim, num_pixels_in_patch, num_patches] --> [batch_size, embed_dim, num_pixels_in_patch, 1] + context_vector = torch.sum(context_vector, dim=-1, keepdim=True) + + # combine context vector with values + # [batch_size, embed_dim, num_pixels_in_patch, num_patches] * [batch_size, embed_dim, num_pixels_in_patch, 1] --> [batch_size, embed_dim, num_pixels_in_patch, num_patches] + out = torch.nn.functional.relu(value) * context_vector.expand_as(value) + out = self.out_proj(out) + return out + + +class MobileViTV2FFN(nn.Module): + def __init__( + self, + config: MobileViTV2Config, + embed_dim: int, + ffn_latent_dim: int, + ffn_dropout: float = 0.0, + ) -> None: + super().__init__() + self.conv1 = MobileViTV2ConvLayer( + config=config, + in_channels=embed_dim, + out_channels=ffn_latent_dim, + kernel_size=1, + stride=1, + bias=True, + use_normalization=False, + use_activation=True, + ) + self.dropout1 = nn.Dropout(ffn_dropout) + + self.conv2 = MobileViTV2ConvLayer( + config=config, + in_channels=ffn_latent_dim, + out_channels=embed_dim, + kernel_size=1, + stride=1, + bias=True, + use_normalization=False, + use_activation=False, + ) + self.dropout2 = nn.Dropout(ffn_dropout) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.conv1(hidden_states) + hidden_states = self.dropout1(hidden_states) + hidden_states = self.conv2(hidden_states) + hidden_states = self.dropout2(hidden_states) + return hidden_states + + +class MobileViTV2TransformerLayer(nn.Module): + def __init__( + self, + config: MobileViTV2Config, + embed_dim: int, + ffn_latent_dim: int, + dropout: float = 0.0, + ) -> None: + super().__init__() + self.layernorm_before = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps) + self.attention = MobileViTV2LinearSelfAttention(config, embed_dim) + self.dropout1 = nn.Dropout(p=dropout) + self.layernorm_after = nn.GroupNorm(num_groups=1, num_channels=embed_dim, eps=config.layer_norm_eps) + self.ffn = MobileViTV2FFN(config, embed_dim, ffn_latent_dim, config.ffn_dropout) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + layernorm_1_out = self.layernorm_before(hidden_states) + attention_output = self.attention(layernorm_1_out) + hidden_states = attention_output + hidden_states + + layer_output = self.layernorm_after(hidden_states) + layer_output = self.ffn(layer_output) + + layer_output = layer_output + hidden_states + return layer_output + + +class MobileViTV2Transformer(nn.Module): + def __init__(self, config: MobileViTV2Config, n_layers: int, d_model: int) -> None: + super().__init__() + + ffn_multiplier = config.ffn_multiplier + + ffn_dims = [ffn_multiplier * d_model] * n_layers + + # ensure that dims are multiple of 16 + ffn_dims = [int((d // 16) * 16) for d in ffn_dims] + + self.layer = nn.ModuleList() + for block_idx in range(n_layers): + transformer_layer = MobileViTV2TransformerLayer( + config, embed_dim=d_model, ffn_latent_dim=ffn_dims[block_idx] + ) + self.layer.append(transformer_layer) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + for layer_module in self.layer: + hidden_states = layer_module(hidden_states) + return hidden_states + + +class MobileViTV2Layer(nn.Module): + """ + MobileViTV2 layer: https://arxiv.org/abs/2206.02680 + """ + + def __init__( + self, + config: MobileViTV2Config, + in_channels: int, + out_channels: int, + attn_unit_dim: int, + n_attn_blocks: int = 2, + dilation: int = 1, + stride: int = 2, + ) -> None: + super().__init__() + self.patch_width = config.patch_size + self.patch_height = config.patch_size + + cnn_out_dim = attn_unit_dim + + if stride == 2: + self.downsampling_layer = MobileViTV2InvertedResidual( + config, + in_channels=in_channels, + out_channels=out_channels, + stride=stride if dilation == 1 else 1, + dilation=dilation // 2 if dilation > 1 else 1, + ) + in_channels = out_channels + else: + self.downsampling_layer = None + + # Local representations + self.conv_kxk = MobileViTV2ConvLayer( + config, + in_channels=in_channels, + out_channels=in_channels, + kernel_size=config.conv_kernel_size, + groups=in_channels, + ) + self.conv_1x1 = MobileViTV2ConvLayer( + config, + in_channels=in_channels, + out_channels=cnn_out_dim, + kernel_size=1, + use_normalization=False, + use_activation=False, + ) + + # Global representations + self.transformer = MobileViTV2Transformer(config, d_model=attn_unit_dim, n_layers=n_attn_blocks) + + # self.layernorm = MobileViTV2LayerNorm2D(attn_unit_dim, eps=config.layer_norm_eps) + self.layernorm = nn.GroupNorm(num_groups=1, num_channels=attn_unit_dim, eps=config.layer_norm_eps) + + # Fusion + self.conv_projection = MobileViTV2ConvLayer( + config, + in_channels=cnn_out_dim, + out_channels=in_channels, + kernel_size=1, + use_normalization=True, + use_activation=False, + ) + + def unfolding(self, feature_map: torch.Tensor) -> Tuple[torch.Tensor, Tuple[int, int]]: + batch_size, in_channels, img_height, img_width = feature_map.shape + patches = nn.functional.unfold( + feature_map, + kernel_size=(self.patch_height, self.patch_width), + stride=(self.patch_height, self.patch_width), + ) + patches = patches.reshape(batch_size, in_channels, self.patch_height * self.patch_width, -1) + + return patches, (img_height, img_width) + + def folding(self, patches: torch.Tensor, output_size: Tuple[int, int]) -> torch.Tensor: + batch_size, in_dim, patch_size, n_patches = patches.shape + patches = patches.reshape(batch_size, in_dim * patch_size, n_patches) + + feature_map = nn.functional.fold( + patches, + output_size=output_size, + kernel_size=(self.patch_height, self.patch_width), + stride=(self.patch_height, self.patch_width), + ) + + return feature_map + + def forward(self, features: torch.Tensor) -> torch.Tensor: + # reduce spatial dimensions if needed + if self.downsampling_layer: + features = self.downsampling_layer(features) + + # local representation + features = self.conv_kxk(features) + features = self.conv_1x1(features) + + # convert feature map to patches + patches, output_size = self.unfolding(features) + + # learn global representations + patches = self.transformer(patches) + patches = self.layernorm(patches) + + # convert patches back to feature maps + # [batch_size, patch_height, patch_width, input_dim] --> [batch_size, input_dim, patch_height, patch_width] + features = self.folding(patches, output_size) + + features = self.conv_projection(features) + return features + + +class MobileViTV2Encoder(nn.Module): + def __init__(self, config: MobileViTV2Config) -> None: + super().__init__() + self.config = config + + self.layer = nn.ModuleList() + self.gradient_checkpointing = False + + # segmentation architectures like DeepLab and PSPNet modify the strides + # of the classification backbones + dilate_layer_4 = dilate_layer_5 = False + if config.output_stride == 8: + dilate_layer_4 = True + dilate_layer_5 = True + elif config.output_stride == 16: + dilate_layer_5 = True + + dilation = 1 + + layer_0_dim = make_divisible( + clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16 + ) + + layer_1_dim = make_divisible(64 * config.width_multiplier, divisor=16) + layer_2_dim = make_divisible(128 * config.width_multiplier, divisor=8) + layer_3_dim = make_divisible(256 * config.width_multiplier, divisor=8) + layer_4_dim = make_divisible(384 * config.width_multiplier, divisor=8) + layer_5_dim = make_divisible(512 * config.width_multiplier, divisor=8) + + layer_1 = MobileViTV2MobileNetLayer( + config, + in_channels=layer_0_dim, + out_channels=layer_1_dim, + stride=1, + num_stages=1, + ) + self.layer.append(layer_1) + + layer_2 = MobileViTV2MobileNetLayer( + config, + in_channels=layer_1_dim, + out_channels=layer_2_dim, + stride=2, + num_stages=2, + ) + self.layer.append(layer_2) + + layer_3 = MobileViTV2Layer( + config, + in_channels=layer_2_dim, + out_channels=layer_3_dim, + attn_unit_dim=make_divisible(config.base_attn_unit_dims[0] * config.width_multiplier, divisor=8), + n_attn_blocks=config.n_attn_blocks[0], + ) + self.layer.append(layer_3) + + if dilate_layer_4: + dilation *= 2 + + layer_4 = MobileViTV2Layer( + config, + in_channels=layer_3_dim, + out_channels=layer_4_dim, + attn_unit_dim=make_divisible(config.base_attn_unit_dims[1] * config.width_multiplier, divisor=8), + n_attn_blocks=config.n_attn_blocks[1], + dilation=dilation, + ) + self.layer.append(layer_4) + + if dilate_layer_5: + dilation *= 2 + + layer_5 = MobileViTV2Layer( + config, + in_channels=layer_4_dim, + out_channels=layer_5_dim, + attn_unit_dim=make_divisible(config.base_attn_unit_dims[2] * config.width_multiplier, divisor=8), + n_attn_blocks=config.n_attn_blocks[2], + dilation=dilation, + ) + self.layer.append(layer_5) + + def forward( + self, + hidden_states: torch.Tensor, + output_hidden_states: bool = False, + return_dict: bool = True, + ) -> Union[tuple, BaseModelOutputWithNoAttention]: + all_hidden_states = () if output_hidden_states else None + + for i, layer_module in enumerate(self.layer): + if self.gradient_checkpointing and self.training: + hidden_states = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + ) + else: + hidden_states = layer_module(hidden_states) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states] if v is not None) + + return BaseModelOutputWithNoAttention(last_hidden_state=hidden_states, hidden_states=all_hidden_states) + + +# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTPreTrainedModel with MobileViT->MobileViTV2,mobilevit->mobilevitv2 +class MobileViTV2PreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = MobileViTV2Config + base_model_prefix = "mobilevitv2" + main_input_name = "pixel_values" + supports_gradient_checkpointing = True + + def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None: + """Initialize the weights""" + if isinstance(module, (nn.Linear, nn.Conv2d)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +MOBILEVITV2_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it + as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`MobileViTV2Config`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +MOBILEVITV2_INPUTS_DOCSTRING = r""" + Args: + pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): + Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See + [`MobileViTImageProcessor.__call__`] for details. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare MobileViTV2 model outputting raw hidden-states without any specific head on top.", + MOBILEVITV2_START_DOCSTRING, +) +class MobileViTV2Model(MobileViTV2PreTrainedModel): + def __init__(self, config: MobileViTV2Config, expand_output: bool = True): + super().__init__(config) + self.config = config + self.expand_output = expand_output + + layer_0_dim = make_divisible( + clip(value=32 * config.width_multiplier, min_val=16, max_val=64), divisor=8, min_value=16 + ) + + self.conv_stem = MobileViTV2ConvLayer( + config, + in_channels=config.num_channels, + out_channels=layer_0_dim, + kernel_size=3, + stride=2, + use_normalization=True, + use_activation=True, + ) + self.encoder = MobileViTV2Encoder(config) + + # Initialize weights and apply final processing + self.post_init() + + def _prune_heads(self, heads_to_prune): + """Prunes heads of the model. + heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel + """ + for layer_index, heads in heads_to_prune.items(): + mobilevitv2_layer = self.encoder.layer[layer_index] + if isinstance(mobilevitv2_layer, MobileViTV2Layer): + for transformer_layer in mobilevitv2_layer.transformer.layer: + transformer_layer.attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPoolingAndNoAttention, + config_class=_CONFIG_FOR_DOC, + modality="vision", + expected_output=_EXPECTED_OUTPUT_SHAPE, + ) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if pixel_values is None: + raise ValueError("You have to specify pixel_values") + + embedding_output = self.conv_stem(pixel_values) + + encoder_outputs = self.encoder( + embedding_output, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if self.expand_output: + last_hidden_state = encoder_outputs[0] + + # global average pooling: (batch_size, channels, height, width) -> (batch_size, channels) + pooled_output = torch.mean(last_hidden_state, dim=[-2, -1], keepdim=False) + else: + last_hidden_state = encoder_outputs[0] + pooled_output = None + + if not return_dict: + output = (last_hidden_state, pooled_output) if pooled_output is not None else (last_hidden_state,) + return output + encoder_outputs[1:] + + return BaseModelOutputWithPoolingAndNoAttention( + last_hidden_state=last_hidden_state, + pooler_output=pooled_output, + hidden_states=encoder_outputs.hidden_states, + ) + + +@add_start_docstrings( + """ + MobileViTV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for + ImageNet. + """, + MOBILEVITV2_START_DOCSTRING, +) +class MobileViTV2ForImageClassification(MobileViTV2PreTrainedModel): + def __init__(self, config: MobileViTV2Config) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.mobilevitv2 = MobileViTV2Model(config) + + out_channels = make_divisible(512 * config.width_multiplier, divisor=8) # layer 5 output dimension + # Classifier head + self.classifier = ( + nn.Linear(in_features=out_channels, out_features=config.num_labels) + if config.num_labels > 0 + else nn.Identity() + ) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_IMAGE_CLASS_CHECKPOINT, + output_type=ImageClassifierOutputWithNoAttention, + config_class=_CONFIG_FOR_DOC, + expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, + ) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + labels: Optional[torch.Tensor] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the image classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss). If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mobilevitv2(pixel_values, output_hidden_states=output_hidden_states, return_dict=return_dict) + + pooled_output = outputs.pooler_output if return_dict else outputs[1] + + logits = self.classifier(pooled_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return ImageClassifierOutputWithNoAttention( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + ) + + +# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTASPPPooling with MobileViT->MobileViTV2 +class MobileViTV2ASPPPooling(nn.Module): + def __init__(self, config: MobileViTV2Config, in_channels: int, out_channels: int) -> None: + super().__init__() + + self.global_pool = nn.AdaptiveAvgPool2d(output_size=1) + + self.conv_1x1 = MobileViTV2ConvLayer( + config, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + stride=1, + use_normalization=True, + use_activation="relu", + ) + + def forward(self, features: torch.Tensor) -> torch.Tensor: + spatial_size = features.shape[-2:] + features = self.global_pool(features) + features = self.conv_1x1(features) + features = nn.functional.interpolate(features, size=spatial_size, mode="bilinear", align_corners=False) + return features + + +class MobileViTV2ASPP(nn.Module): + """ + ASPP module defined in DeepLab papers: https://arxiv.org/abs/1606.00915, https://arxiv.org/abs/1706.05587 + """ + + def __init__(self, config: MobileViTV2Config) -> None: + super().__init__() + + encoder_out_channels = make_divisible(512 * config.width_multiplier, divisor=8) # layer 5 output dimension + in_channels = encoder_out_channels + out_channels = config.aspp_out_channels + + if len(config.atrous_rates) != 3: + raise ValueError("Expected 3 values for atrous_rates") + + self.convs = nn.ModuleList() + + in_projection = MobileViTV2ConvLayer( + config, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=1, + use_activation="relu", + ) + self.convs.append(in_projection) + + self.convs.extend( + [ + MobileViTV2ConvLayer( + config, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=3, + dilation=rate, + use_activation="relu", + ) + for rate in config.atrous_rates + ] + ) + + pool_layer = MobileViTV2ASPPPooling(config, in_channels, out_channels) + self.convs.append(pool_layer) + + self.project = MobileViTV2ConvLayer( + config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu" + ) + + self.dropout = nn.Dropout(p=config.aspp_dropout_prob) + + def forward(self, features: torch.Tensor) -> torch.Tensor: + pyramid = [] + for conv in self.convs: + pyramid.append(conv(features)) + pyramid = torch.cat(pyramid, dim=1) + + pooled_features = self.project(pyramid) + pooled_features = self.dropout(pooled_features) + return pooled_features + + +# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTDeepLabV3 with MobileViT->MobileViTV2 +class MobileViTV2DeepLabV3(nn.Module): + """ + DeepLabv3 architecture: https://arxiv.org/abs/1706.05587 + """ + + def __init__(self, config: MobileViTV2Config) -> None: + super().__init__() + self.aspp = MobileViTV2ASPP(config) + + self.dropout = nn.Dropout2d(config.classifier_dropout_prob) + + self.classifier = MobileViTV2ConvLayer( + config, + in_channels=config.aspp_out_channels, + out_channels=config.num_labels, + kernel_size=1, + use_normalization=False, + use_activation=False, + bias=True, + ) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + features = self.aspp(hidden_states[-1]) + features = self.dropout(features) + features = self.classifier(features) + return features + + +@add_start_docstrings( + """ + MobileViTV2 model with a semantic segmentation head on top, e.g. for Pascal VOC. + """, + MOBILEVITV2_START_DOCSTRING, +) +class MobileViTV2ForSemanticSegmentation(MobileViTV2PreTrainedModel): + def __init__(self, config: MobileViTV2Config) -> None: + super().__init__(config) + + self.num_labels = config.num_labels + self.mobilevitv2 = MobileViTV2Model(config, expand_output=False) + self.segmentation_head = MobileViTV2DeepLabV3(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(MOBILEVITV2_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=SemanticSegmenterOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + pixel_values: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[tuple, SemanticSegmenterOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): + Ground truth semantic segmentation maps for computing the loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels > 1`, a classification loss is computed (Cross-Entropy). + + Returns: + + Examples: + + ```python + >>> import requests + >>> import torch + >>> from PIL import Image + >>> from transformers import AutoImageProcessor, MobileViTV2ForSemanticSegmentation + + >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" + >>> image = Image.open(requests.get(url, stream=True).raw) + + >>> image_processor = AutoImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256") + >>> model = MobileViTV2ForSemanticSegmentation.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256") + + >>> inputs = image_processor(images=image, return_tensors="pt") + + >>> with torch.no_grad(): + ... outputs = model(**inputs) + + >>> # logits are of shape (batch_size, num_labels, height, width) + >>> logits = outputs.logits + ```""" + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.mobilevitv2( + pixel_values, + output_hidden_states=True, # we need the intermediate hidden states + return_dict=return_dict, + ) + + encoder_hidden_states = outputs.hidden_states if return_dict else outputs[1] + + logits = self.segmentation_head(encoder_hidden_states) + + loss = None + if labels is not None: + if self.config.num_labels == 1: + raise ValueError("The number of labels should be greater than one") + else: + # upsample logits to the images' original size + upsampled_logits = nn.functional.interpolate( + logits, size=labels.shape[-2:], mode="bilinear", align_corners=False + ) + loss_fct = CrossEntropyLoss(ignore_index=self.config.semantic_loss_ignore_index) + loss = loss_fct(upsampled_logits, labels) + + if not return_dict: + if output_hidden_states: + output = (logits,) + outputs[1:] + else: + output = (logits,) + outputs[2:] + return ((loss,) + output) if loss is not None else output + + return SemanticSegmenterOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states if output_hidden_states else None, + attentions=None, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ff14e5b987a789c86f3ca37e11d79afe540a177e --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__init__.py @@ -0,0 +1,78 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available + + +_import_structure = { + "configuration_xlm_prophetnet": ["XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMProphetNetConfig"], +} + +try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_xlm_prophetnet"] = ["XLMProphetNetTokenizer"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_xlm_prophetnet"] = [ + "XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST", + "XLMProphetNetDecoder", + "XLMProphetNetEncoder", + "XLMProphetNetForCausalLM", + "XLMProphetNetForConditionalGeneration", + "XLMProphetNetModel", + "XLMProphetNetPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig + + try: + if not is_sentencepiece_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_xlm_prophetnet import XLMProphetNetTokenizer + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_xlm_prophetnet import ( + XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, + XLMProphetNetDecoder, + XLMProphetNetEncoder, + XLMProphetNetForCausalLM, + XLMProphetNetForConditionalGeneration, + XLMProphetNetModel, + XLMProphetNetPreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7609bca44767b1137619720203c38d9c6e959ea0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/configuration_xlm_prophetnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/configuration_xlm_prophetnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b4c29988c65c23461b8706118d150eebc9fa0e2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/configuration_xlm_prophetnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/modeling_xlm_prophetnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/modeling_xlm_prophetnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8de1fbd1a7ab315ef1966a3ae86db8ad35101a04 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/modeling_xlm_prophetnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/tokenization_xlm_prophetnet.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/tokenization_xlm_prophetnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ccdf23106936228b13d321d0e05ea9df155665b Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/__pycache__/tokenization_xlm_prophetnet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f1a903c227bf596c9bebaa610251b9ef5c68aa44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/configuration_xlm_prophetnet.py @@ -0,0 +1,182 @@ +# coding=utf-8 +# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" XLM-ProphetNet model configuration""" + + +from typing import Callable, Optional, Union + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class XLMProphetNetConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`XLMProphetNetModel`]. It is used to instantiate a + XLMProphetNet model according to the specified arguments, defining the model architecture. Instantiating a + configuration with the defaults will yield a similar configuration to that of the XLMProphetNet + [microsoft/xprophetnet-large-wiki100-cased](https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased) + architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + activation_dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio for activations inside the fully connected layer. + activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"silu"` and `"gelu_new"` are supported. + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the ProphetNET model. Defines the number of different tokens that can be represented by + the `inputs_ids` passed when calling [`XLMProphetNetModel`]. + hidden_size (`int`, *optional*, defaults to 1024): + Dimensionality of the layers and the pooler layer. + encoder_ffn_dim (`int`, *optional*, defaults to 4096): + Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. + num_encoder_layers (`int`, *optional*, defaults to 12): + Number of encoder layers. + num_encoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + decoder_ffn_dim (`int`, *optional*, defaults to 4096): + Dimensionality of the `intermediate` (often named feed-forward) layer in decoder. + num_decoder_layers (`int`, *optional*, defaults to 12): + Number of decoder layers. + num_decoder_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer decoder. + attention_dropout (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + dropout (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + init_std (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + add_cross_attention (`bool`, *optional*, defaults to `True`): + Whether cross-attention layers should be added to the model. + is_encoder_decoder (`bool`, *optional*, defaults to `True`): + Whether this is an encoder/decoder model. + pad_token_id (`int`, *optional*, defaults to 1) + Padding token id. + bos_token_id (`int`, *optional*, defaults to 0) + Beginning of stream token id. + eos_token_id (`int`, *optional*, defaults to 2) + End of stream token id. + ngram (`int`, *optional*, defaults to 2) + Number of future tokens to predict. Set to 1 to be same as traditional Language model to predict next first + token. + num_buckets (`int`, *optional*, defaults to 32) + The number of buckets to use for each attention layer. This is for relative position calculation. See the + [T5 paper](see https://arxiv.org/abs/1910.10683) for more details. + relative_max_distance (`int`, *optional*, defaults to 128) + Relative distances greater than this number will be put into the last same bucket. This is for relative + position calculation. See the [T5 paper](see https://arxiv.org/abs/1910.10683) for more details. + disable_ngram_loss (`bool`, *optional*, defaults to `False`): + Whether be trained predicting only the next first token. + eps (`float`, *optional*, defaults to 0.0): + Controls the `epsilon` parameter value for label smoothing in the loss calculation. If set to 0, no label + smoothing is performed. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). + """ + + model_type = "xlm-prophetnet" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = { + "num_attention_heads": "num_encoder_attention_heads", + } + + def __init__( + self, + activation_dropout: Optional[float] = 0.1, + activation_function: Optional[Union[str, Callable]] = "gelu", + vocab_size: Optional[int] = 30522, + hidden_size: Optional[int] = 1024, + encoder_ffn_dim: Optional[int] = 4096, + num_encoder_layers: Optional[int] = 12, + num_encoder_attention_heads: Optional[int] = 16, + decoder_ffn_dim: Optional[int] = 4096, + num_decoder_layers: Optional[int] = 12, + num_decoder_attention_heads: Optional[int] = 16, + attention_dropout: Optional[float] = 0.1, + dropout: Optional[float] = 0.1, + max_position_embeddings: Optional[int] = 512, + init_std: Optional[float] = 0.02, + is_encoder_decoder: Optional[bool] = True, + add_cross_attention: Optional[bool] = True, + decoder_start_token_id: Optional[int] = 0, + ngram: Optional[int] = 2, + num_buckets: Optional[int] = 32, + relative_max_distance: Optional[int] = 128, + disable_ngram_loss: Optional[bool] = False, + eps: Optional[float] = 0.0, + use_cache: Optional[bool] = True, + pad_token_id: Optional[int] = 0, + bos_token_id: Optional[int] = 1, + eos_token_id: Optional[int] = 2, + **kwargs, + ): + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.encoder_ffn_dim = encoder_ffn_dim + self.num_encoder_layers = num_encoder_layers + self.num_encoder_attention_heads = num_encoder_attention_heads + self.decoder_ffn_dim = decoder_ffn_dim + self.num_decoder_layers = num_decoder_layers + self.num_decoder_attention_heads = num_decoder_attention_heads + self.max_position_embeddings = max_position_embeddings + self.init_std = init_std # Normal(0, this parameter) + self.activation_function = activation_function + + # parameters for xlmprophetnet + self.ngram = ngram + self.num_buckets = num_buckets + self.relative_max_distance = relative_max_distance + self.disable_ngram_loss = disable_ngram_loss + self.eps = eps + + # 3 Types of Dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.dropout = dropout + + self.use_cache = use_cache + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + is_encoder_decoder=is_encoder_decoder, + add_cross_attention=add_cross_attention, + decoder_start_token_id=decoder_start_token_id, + **kwargs, + ) + + @property + def num_hidden_layers(self) -> int: + return self.num_encoder_layers + self.num_decoder_layers + + @num_hidden_layers.setter + def num_hidden_layers(self, value): + raise NotImplementedError( + "This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and" + " `num_decoder_layers`." + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py new file mode 100644 index 0000000000000000000000000000000000000000..53b8a1fc20cbb595f8e038e8464acde34132d771 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/modeling_xlm_prophetnet.py @@ -0,0 +1,2366 @@ +# coding=utf-8 +# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch XLM-ProphetNet model.""" + + +import copy +import math +import warnings +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import Tensor, nn +from torch.nn import LayerNorm + +from ...activations import ACT2FN +from ...modeling_outputs import BaseModelOutput +from ...modeling_utils import PreTrainedModel +from ...utils import ( + ModelOutput, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, + replace_return_docstrings, +) +from .configuration_xlm_prophetnet import XLMProphetNetConfig + + +logger = logging.get_logger(__name__) + + +_CONFIG_FOR_DOC = "XLMProphetNetConfig" + + +from ..deprecated._archive_maps import XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# Copied from src.transformers.models.prophetnet.modeling_prophetnet.PROPHETNET_START_DOCSTRING with ProphetNetConfig->XLMProphetNetConfig +XLM_PROPHETNET_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + Original ProphetNet code can be found [here](https://github.com/microsoft/ProphetNet). Checkpoints were converted + from original Fairseq checkpoints. For more information on the checkpoint conversion, please take a look at the + file `convert_prophetnet_original_pytorch_checkpoint_to_pytorch.py`. + + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matters related to general usage and + behavior. + + Parameters: + config ([`XLMProphetNetConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +# Copied from src.transformers.models.prophetnet.modeling_prophetnet.PROPHETNET_INPUTS_DOCSTRING with ProphetNet->XLMProphetNet +XLM_PROPHETNET_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Indices of decoder input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are decoder input IDs?](../glossary#decoder-input-ids) + + XLMProphetNet uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If + `past_key_values` is used, optionally only the last `decoder_input_ids` have to be input (see + `past_key_values`). + + decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*): + Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also + be used by default. + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + decoder_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): + Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) + `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of + hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from src.transformers.models.prophetnet.modeling_prophetnet.PROPHETNET_STANDALONE_INPUTS_DOCSTRING with ProphetNet->XLMProphetNet +XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.softmax +def softmax(hidden_state, dim, onnx_trace=False): + if onnx_trace: + return nn.functional.softmax(hidden_state.float(), dim=dim) + else: + return nn.functional.softmax(hidden_state, dim=dim, dtype=torch.float32) + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.ngram_attention_bias +def ngram_attention_bias(sequence_length, ngram, device, dtype): + """ + This function computes the bias for the predict stream + """ + left_block = ( + torch.ones((ngram, sequence_length, sequence_length), device=device, dtype=dtype) * torch.finfo(dtype).min + ) + right_block = left_block.detach().clone() + # create bias + for stream_idx in range(ngram): + right_block[stream_idx].fill_diagonal_(0, wrap=False) + left_block[stream_idx].triu_(-stream_idx + 1) + + left_block[:, :, 0] = 0 + return torch.cat([left_block, right_block], dim=2) + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.compute_relative_buckets +def compute_relative_buckets(num_buckets, max_distance, relative_positions, is_bidirectional=False): + """ + This function computes individual parts of the relative position buckets. For more detail, see paper. + """ + inv_relative_positions = -relative_positions + rel_positions_bucket = 0 + + if is_bidirectional: + num_buckets = num_buckets // 2 + rel_positions_bucket = ( + rel_positions_bucket + + torch.lt(inv_relative_positions, torch.zeros_like(inv_relative_positions)).int() * num_buckets + ) + inv_relative_positions = torch.abs(inv_relative_positions) + else: + inv_relative_positions = torch.max(inv_relative_positions, torch.zeros_like(inv_relative_positions)) + + max_exact = num_buckets // 2 + is_small = torch.lt(inv_relative_positions, max_exact) + val_if_large = max_exact + torch.log(inv_relative_positions.float() / max_exact) / math.log( + max_distance / max_exact + ) * (num_buckets - max_exact) + val_if_large = torch.min(val_if_large, torch.ones_like(val_if_large) * (num_buckets - 1)).int() + rel_positions_bucket = rel_positions_bucket + torch.where(is_small, inv_relative_positions.int(), val_if_large) + return rel_positions_bucket + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.compute_all_stream_relative_buckets +def compute_all_stream_relative_buckets(num_buckets, max_distance, position_ids): + """ + This function computes both main and predict relative position buckets. For more detail, see paper. + """ + # main stream + main_stream_relative_positions = position_ids.unsqueeze(1).repeat(1, position_ids.size(-1), 1) + main_stream_relative_positions = main_stream_relative_positions - position_ids.unsqueeze(-1) + + # predicting stream + predicting_stream_relative_positions = torch.cat((position_ids - 1, position_ids), dim=-1).unsqueeze(1) + predicting_stream_relative_positions = predicting_stream_relative_positions.repeat(1, position_ids.size(-1), 1) + predicting_stream_relative_positions = predicting_stream_relative_positions - position_ids.unsqueeze(-1) + + # get both position buckets + main_relative_position_buckets = compute_relative_buckets( + num_buckets, max_distance, main_stream_relative_positions, is_bidirectional=False + ) + predict_relative_position_buckets = compute_relative_buckets( + num_buckets, max_distance, predicting_stream_relative_positions, is_bidirectional=False + ) + return main_relative_position_buckets, predict_relative_position_buckets + + +@dataclass +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqLMOutput with ProphetNet->XLMProphetNet all-casing +class XLMProphetNetSeq2SeqLMOutput(ModelOutput): + """ + Base class for sequence-to-sequence language models outputs. + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`): + Prediction scores of the main stream language modeling head (scores for each vocabulary token before + SoftMax). + logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): + Prediction scores of the predict stream language modeling head (scores for each vocabulary token before + SoftMax). + past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, + num_attn_heads, decoder_sequence_length, embed_size_per_head)`). + + Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be + used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, decoder_sequence_length, hidden_size)`. + + Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. + decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. + + Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding + outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + decoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + decoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the + weighted average in the self-attention heads. + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + encoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to + compute the weighted average in the + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, encoder_sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + encoder_sequence_length, encoder_sequence_length)`. Attentions weights of the encoder, after the attention + softmax, used to compute the weighted average in the self-attention heads. + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + logits_ngram: Optional[torch.FloatTensor] = None + past_key_values: Optional[Tuple[torch.FloatTensor]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + @property + def decoder_cross_attentions(self): + warnings.warn( + "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`" + " instead.", + FutureWarning, + ) + return self.cross_attentions + + +@dataclass +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetSeq2SeqModelOutput with ProphetNet->XLMProphetNet all-casing +class XLMProphetNetSeq2SeqModelOutput(ModelOutput): + """ + Base class for model encoder's outputs that also contains : pre-computed hidden states that can speed up sequential + decoding. + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`): + Sequence of main stream hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size,ngram * decoder_sequence_length, config.vocab_size)`, *optional*): + Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model. + past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, + num_attn_heads, decoder_sequence_length, embed_size_per_head)`). + + Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be + used (see `past_key_values` input) to speed up sequential decoding. + decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, decoder_sequence_length, hidden_size)`. + + Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. + decoder_ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. + + Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding + outputs. + decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + decoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + decoder_ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + decoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the + weighted average in the + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + encoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to + compute the weighted average in the + encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder of the model. + encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, encoder_sequence_length, hidden_size)`. + + Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. + encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + encoder_sequence_length, encoder_sequence_length)`. + + Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + """ + + last_hidden_state: torch.FloatTensor + last_hidden_state_ngram: Optional[torch.FloatTensor] = None + past_key_values: Optional[Tuple[torch.FloatTensor]] = None + decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_ngram_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + decoder_ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + encoder_last_hidden_state: Optional[torch.FloatTensor] = None + encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None + encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None + + @property + def decoder_cross_attentions(self): + warnings.warn( + "`decoder_cross_attentions` is deprecated and will be removed soon. Please use `cross_attentions`" + " instead.", + FutureWarning, + ) + return self.cross_attentions + + +@dataclass +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderModelOutput with ProphetNet->XLMProphetNet all-casing +class XLMProphetNetDecoderModelOutput(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). + + Args: + last_hidden_state (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, hidden_size)`): + Sequence of main stream hidden-states at the output of the last layer of the decoder of the model. + + If `past_key_values` is used only the last hidden-state of the sequences of shape `(batch_size, 1, + hidden_size)` is output. + last_hidden_state_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): + Sequence of predict stream hidden-states at the output of the last layer of the decoder of the model. + past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, + num_attn_heads, decoder_sequence_length, embed_size_per_head)`). + + Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be + used (see `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, decoder_sequence_length, hidden_size)`. + + Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. + ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. + + Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding + outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + decoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + decoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the + weighted average in the + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + encoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to + compute the weighted average in the + """ + + last_hidden_state: torch.FloatTensor + last_hidden_state_ngram: Optional[torch.FloatTensor] = None + past_key_values: Optional[Tuple[torch.FloatTensor]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +@dataclass +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLMOutput with ProphetNet->XLMProphetNet all-casing +class XLMProphetNetDecoderLMOutput(ModelOutput): + """ + Base class for model's outputs that may also contain a past key/values (to speed up sequential decoding). + + Args: + loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): + Language modeling loss. + logits (`torch.FloatTensor` of shape `(batch_size, decoder_sequence_length, config.vocab_size)`): + Prediction scores of the main stream language modeling head (scores for each vocabulary token before + SoftMax). + logits_ngram (`torch.FloatTensor` of shape `(batch_size, ngram * decoder_sequence_length, config.vocab_size)`): + Prediction scores of the predict stream language modeling head (scores for each vocabulary token before + SoftMax). + past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): + List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, + num_attn_heads, decoder_sequence_length, embed_size_per_head)`). + + Contains pre-computed hidden-states (key and values in the attention blocks) of the decoder that can be + used (see `past_key_values` input) to speed up sequential decoding. + hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, decoder_sequence_length, hidden_size)`. + + Hidden-states of main stream of the decoder at the output of each layer plus the initial embedding outputs. + ngram_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): + Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of + shape `(batch_size, ngram * decoder_sequence_length, hidden_size)`. + + Hidden-states of the predict stream of the decoder at the output of each layer plus the initial embedding + outputs. + attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + decoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the + self-attention heads. + ngram_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + decoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the predict stream of the decoder, after the attention softmax, used to compute the + weighted average in the + cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): + Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_attn_heads, + encoder_sequence_length, decoder_sequence_length)`. + + Attentions weights of the cross-attention layer of the decoder, after the attention softmax, used to + compute the weighted average in the + """ + + loss: Optional[torch.FloatTensor] = None + logits: torch.FloatTensor = None + logits_ngram: Optional[torch.FloatTensor] = None + past_key_values: Optional[Tuple[torch.FloatTensor]] = None + hidden_states: Optional[Tuple[torch.FloatTensor]] = None + hidden_states_ngram: Optional[Tuple[torch.FloatTensor]] = None + attentions: Optional[Tuple[torch.FloatTensor]] = None + ngram_attentions: Optional[Tuple[torch.FloatTensor]] = None + cross_attentions: Optional[Tuple[torch.FloatTensor]] = None + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetPreTrainedModel with ProphetNet->XLMProphetNet +class XLMProphetNetPreTrainedModel(PreTrainedModel): + config_class = XLMProphetNetConfig + base_model_prefix = "prophetnet" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=self.config.init_std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.init_std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _shift_right(self, input_ids): + decoder_start_token_id = self.config.decoder_start_token_id + pad_token_id = self.config.pad_token_id + + assert decoder_start_token_id is not None, ( + "self.model.config.decoder_start_token_id has to be defined. In XLMProphetNet it is usually set to the" + " pad_token_id. See XLMProphetNet docs for more information" + ) + + # shift inputs to the right + shifted_input_ids = input_ids.new_zeros(input_ids.shape) + shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() + shifted_input_ids[..., 0] = decoder_start_token_id + + assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." + # replace possible -100 values in labels by `pad_token_id` + shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) + + assert torch.all(shifted_input_ids >= 0).item(), "Verify that `shifted_input_ids` has only positive values" + + return shifted_input_ids + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetPositionalEmbeddings with ProphetNet->XLMProphetNet +class XLMProphetNetPositionalEmbeddings(nn.Embedding): + """ + This module learns positional embeddings up to a fixed maximum size. Padding ids are ignored by either offsetting + based on padding_idx or by setting padding_idx to None and ensuring that the appropriate position ids are passed to + the forward function. + """ + + def __init__(self, config: XLMProphetNetConfig) -> None: + self.max_length = config.max_position_embeddings + super().__init__(config.max_position_embeddings, config.hidden_size, config.pad_token_id) + + def forward(self, inputs_shape, device, attention_mask=None, past_key_values=None, position_ids=None): + assert (position_ids is None) or ( + self.padding_idx is None + ), "If position_ids is pre-computed then padding_idx should not be set." + + if position_ids is None: + if past_key_values is not None: + # position_ids is the same for every token when decoding a single step + # Without the int() cast, it doesn't work in some cases when exporting to ONNX + prev_num_input_ids = past_key_values[0][0].shape[2] + num_input_ids = inputs_shape[1] + prev_num_input_ids + position_ids = torch.ones((1, 1), dtype=torch.long, device=device) * ( + int(self.padding_idx + num_input_ids) + ) + else: + if attention_mask is None: + attention_mask = torch.ones(inputs_shape, dtype=torch.long, device=device) + + # retrieve position_ids from input_ids / attention_mask + position_ids = ( + torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask + ).long() + self.padding_idx + + # make sure position_ids are not bigger then max_length + position_ids = position_ids.clamp(0, self.max_length - 1) + + return super().forward(position_ids), position_ids + + def _forward(self, position_ids): + return super().forward(position_ids) + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetAttention with ProphetNet->XLMProphetNet +class XLMProphetNetAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__( + self, + config: XLMProphetNetConfig, + num_attn_heads: int, + ): + super().__init__() + hidden_size = config.hidden_size + + self.attention_dropout = config.attention_dropout + self.dropout = config.dropout + self.num_attn_heads = num_attn_heads + self.head_dim = hidden_size // num_attn_heads + + assert self.head_dim * num_attn_heads == hidden_size, ( + "`config.hidden_size` must be divisible by `config.num_encoder_attention_heads` and" + " `config.num_decoder_attention_heads`" + ) + + self.key_proj = nn.Linear(hidden_size, hidden_size) + self.value_proj = nn.Linear(hidden_size, hidden_size) + self.query_proj = nn.Linear(hidden_size, hidden_size) + + self.out_proj = nn.Linear(hidden_size, hidden_size) + + def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): + return tensor.view(bsz, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous() + + def forward( + self, + hidden_states, + key_value_states: Optional[Tensor] = None, + attention_mask: Optional[Tensor] = None, + layer_head_mask: Optional[Tensor] = None, + past_key_value: Optional[Tuple[Tensor]] = None, + output_attentions: bool = False, + ) -> Tuple[Tensor, Optional[Tensor]]: + batch_size, tgt_len, hidden_size = hidden_states.size() + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + assert list(hidden_states.size()) == [ + batch_size, + tgt_len, + hidden_size, + ], f"Size of hidden states should be {batch_size, tgt_len, hidden_size}, but is {hidden_states.size()}" + + # previous time steps are cached - no need to recompute key and value if they are static + query_states = self.query_proj(hidden_states) / (self.head_dim**0.5) + + if is_cross_attention and past_key_value is not None: + # reuse k,v, cross_attentions + key_states = past_key_value[0] + value_states = past_key_value[1] + elif is_cross_attention: + # cross_attentions + key_states = self._shape(self.key_proj(key_value_states), -1, batch_size) + value_states = self._shape(self.value_proj(key_value_states), -1, batch_size) + else: + # self_attention + key_states = self._shape(self.key_proj(hidden_states), -1, batch_size) + value_states = self._shape(self.value_proj(hidden_states), -1, batch_size) + + if is_cross_attention: + # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. + # Further calls to cross_attention layer can then reuse all cross-attention + # key/value_states (first "if" case) + # if encoder bi-directional self-attention `past_key_value` is always `None` + past_key_value = (key_states, value_states) + + # project states into the correct shape + proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim) + query_states = self._shape(query_states, tgt_len, batch_size).view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + src_len = key_states.size(2) + attn_weights = torch.einsum("bsij,bsjk->bsik", query_states, key_states.transpose(2, 3)) + expected_shape = (batch_size, self.num_attn_heads, tgt_len, src_len) + if attn_weights.size() != expected_shape: + raise ValueError(f"Attention weights should have size {expected_shape}, but is {attn_weights.size()}") + + # This is part of a workaround to get around fork/join parallelism not supporting Optional types. + if attention_mask is not None and attention_mask.dim() == 0: + attention_mask = None + + expected_shape = (batch_size, self.num_attn_heads, 1, src_len) + if attention_mask is not None and attention_mask.size() != expected_shape: + raise ValueError(f"Attention mask should have size {expected_shape}, but is {attention_mask.size()}") + if attention_mask is not None: # don't attend to padding symbols + attn_weights = attn_weights + attention_mask + if output_attentions: + attn_weights_reshaped = attn_weights + else: + attn_weights_reshaped = None + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + if layer_head_mask is not None: + assert layer_head_mask.size() == (self.num_attn_heads,), ( + f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view( + batch_size, self.num_attn_heads, tgt_len, src_len + ) + + # apply head_mask also on attn_weights_reshaped which is used for n-gram attention inside the model + attn_weights_reshaped = layer_head_mask.view(1, -1, 1, 1) * attn_weights_reshaped + + attn_probs = nn.functional.dropout( + attn_weights, + p=self.attention_dropout, + training=self.training, + ) + attn_output = torch.einsum("bsij,bsjk->bsik", attn_probs, value_states) + expected_shape = (batch_size, self.num_attn_heads, tgt_len, self.head_dim) + if attn_output.size() != expected_shape: + raise ValueError(f"`attn_output` should have shape {expected_shape}, but is of shape {attn_output.size()}") + + attn_output = attn_output.transpose(1, 2).reshape(batch_size, tgt_len, hidden_size) + attn_output = self.out_proj(attn_output) + + attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training) + return attn_output, attn_weights_reshaped, past_key_value + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetFeedForward with ProphetNet->XLMProphetNet +class XLMProphetNetFeedForward(nn.Module): + """ + This is the residual two feed-forward layer block based on the original Transformer implementation. + """ + + def __init__(self, config: XLMProphetNetConfig, ffn_dim: int): + super().__init__() + self.activation_fn = ACT2FN[config.activation_function] + self.intermediate = nn.Linear(config.hidden_size, ffn_dim) + self.output = nn.Linear(ffn_dim, config.hidden_size) + self.activation_dropout = config.activation_dropout + self.dropout = config.dropout + + def forward(self, hidden_states): + hidden_states = self.intermediate(hidden_states) + hidden_states = self.activation_fn(hidden_states) + + hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) + hidden_states = self.output(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + return hidden_states + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetNgramSelfAttention with ProphetNet->XLMProphetNet +class XLMProphetNetNgramSelfAttention(nn.Module): + def __init__(self, config: XLMProphetNetConfig): + super().__init__() + self.hidden_size = config.hidden_size + + self.num_buckets = config.num_buckets + self.relative_max_distance = config.relative_max_distance + self.num_attn_heads = config.num_decoder_attention_heads + self.dropout = config.dropout + self.attention_dropout = config.attention_dropout + self.head_dim = config.hidden_size // self.num_attn_heads + self.ngram = config.ngram + + assert ( + self.head_dim * self.num_attn_heads == config.hidden_size + ), "config.hidden_size must be divisible by num_attn_heads" + # key, value, query projection + self.key_proj = nn.Linear(config.hidden_size, config.hidden_size) + self.value_proj = nn.Linear(config.hidden_size, config.hidden_size) + self.query_proj = nn.Linear(config.hidden_size, config.hidden_size) + + # out projection + self.out_proj = nn.Linear(config.hidden_size, config.hidden_size) + + # rel position embeddings + self.relative_pos_embeddings = nn.Linear(config.hidden_size, self.num_buckets * self.num_attn_heads) + + # for onnx runtime + self.onnx_trace = False + + def _shape(self, tensor, seq_len, batch_size): + return tensor.view(batch_size, seq_len, self.num_attn_heads, self.head_dim).transpose(1, 2).contiguous() + + def prepare_for_onnx_export_(self): + self.onnx_trace = True + + def forward( + self, + hidden_states, + past_key_value: Optional[Tuple[Tensor]] = None, + attention_mask=None, + layer_head_mask=None, + extended_predict_attention_mask=None, + main_relative_position_buckets=None, + predict_relative_position_buckets=None, + position_ids=None, + ): + batch_size, ngram_sequence_length, hidden_size = hidden_states.size() + assert list(hidden_states.size()) == [batch_size, ngram_sequence_length, hidden_size], ( + f"`hidden_states` should be of shape {batch_size, ngram_sequence_length, hidden_size}, but is of shape" + f" {hidden_states.shape}" + ) + + # project + query_states = self.query_proj(hidden_states) + key_states = self.key_proj(hidden_states) + value_states = self.value_proj(hidden_states) + + # normalize + query_states = query_states / (self.head_dim**0.5) + + # reshape + query_states = self._shape(query_states, ngram_sequence_length, batch_size) + key_states = self._shape(key_states, -1, batch_size) + value_states = self._shape(value_states, -1, batch_size) + proj_shape = (batch_size, self.num_attn_heads, -1, self.head_dim) + + query_states = query_states.view(*proj_shape) + key_states = key_states.view(*proj_shape) + value_states = value_states.view(*proj_shape) + + # chunk into main stream and predict stream + hidden_states_list = hidden_states.chunk(1 + self.ngram, dim=1) + query_states_list = query_states.chunk(1 + self.ngram, dim=2) + key_states_list = key_states.chunk(1 + self.ngram, dim=2) + value_states_list = value_states.chunk(1 + self.ngram, dim=2) + + main_hidden_states, hidden_states_predict_list = hidden_states_list[0], hidden_states_list[1:] + main_query_states, predict_query_states_list = query_states_list[0], query_states_list[1:] + main_key_states, predict_key_states_list = key_states_list[0], key_states_list[1:] + main_value_states, predict_value_states_list = value_states_list[0], value_states_list[1:] + + # saved states are stored with shape (batch_size, num_attn_heads, seq_len, head_dim) + if past_key_value is not None: + prev_main_key_states = past_key_value[0] + main_key_states = torch.cat((prev_main_key_states, main_key_states), dim=2) + prev_main_value_states = past_key_value[1] + main_value_states = torch.cat((prev_main_value_states, main_value_states), dim=2) + + # Update cache + past_key_value = (main_key_states, main_value_states) + + # get seq_length of main stream only + sequence_length = ngram_sequence_length // (1 + self.ngram) + + # MAIN-STREAM + # main attn weights + # [batch_size, number_heads, sequence_length, head_dimesion] + # x [batch_size, number_heads, head_dimesion, sequence_length] + # -> [batch_size, number_heads, sequence_length, sequence_length] + main_attn_weights = torch.einsum("bntc,bncs->bnts", main_query_states, main_key_states.transpose(2, 3)) + + # retrieve relative position embeddings for each layer -> see paper for more details + main_relative_pos_embeddings = self.get_main_relative_pos_embeddings( + main_hidden_states, main_attn_weights, position_ids, main_relative_position_buckets + ) + + main_attn_weights = main_attn_weights + main_relative_pos_embeddings + + if attention_mask is not None: + main_attn_weights = main_attn_weights + attention_mask + + main_attn_probs = softmax( + main_attn_weights, + dim=-1, + onnx_trace=self.onnx_trace, + ).type_as(main_attn_weights) + + if layer_head_mask is not None: + assert layer_head_mask.size() == (self.num_attn_heads,), ( + f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + main_attn_probs = layer_head_mask.view(1, -1, 1, 1) * main_attn_probs.view( + batch_size, self.num_attn_heads, -1, sequence_length + ) + + main_attn_probs = nn.functional.dropout(main_attn_probs, p=self.attention_dropout, training=self.training) + # project to attn_output + # [batch_size, number_heads, sequence_length, sequence_length] + # x [batch_size, number_heads, sequence_length, head_dimesion] + # -> [batch_size, number_heads, sequence_length, head_dimesion] + main_attn_output = torch.einsum("bntc,bncs->bnts", main_attn_probs, main_value_states) + # reshape so that num_heads dim is merged into last `head_dim` axis + main_attn_output = main_attn_output.transpose(1, 2).reshape(batch_size, 1, sequence_length, hidden_size) + main_attn_output = self.out_proj(main_attn_output) + + # PREDICT-STREAM + # [batch_size, ngram, number_heads, sequence_length, head_dimesion] + predict_query_states = torch.stack(predict_query_states_list, 1).view( + batch_size, self.ngram, self.num_attn_heads, sequence_length, self.head_dim + ) + + # [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion] + predict_key_states = torch.stack([torch.cat([main_key_states, key], 2) for key in predict_key_states_list], 1) + + # [batch_size, sequence_length, ngram, hidden_size] + predict_hidden_states = torch.stack(hidden_states_predict_list, dim=2) + + # [batch_size, number_heads, ngram, 2*sequence_length, head_dimesion] + predict_value_states = torch.cat( + [torch.cat([main_value_states, v_p], 2).unsqueeze(2) for v_p in predict_value_states_list], 2 + ) + + # [batch_size, ngram, number_heads, sequence_length, head_dimesion] + # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion] + # -> [batch_size, ngram, number_heads, sequence_length, 2*sequence_length] + predict_attn_weights = torch.einsum("bnhtc,bnhsc->bnhts", (predict_query_states, predict_key_states)) + + # retrieve relative position embeddings for each layer -> see paper for more details + # [batch_size, ngram, number_heads, sequence_length, predict_relative_pos_embeddings] + predict_relative_pos_embeddings = self.get_predict_relative_pos_embeddings( + predict_hidden_states, predict_attn_weights, position_ids, predict_relative_position_buckets + ) + + # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length] + predict_attn_weights = predict_attn_weights + predict_relative_pos_embeddings + + if extended_predict_attention_mask is not None: + # Permuting Predict attention mask to [batch_size, ngram, number_heads, sequence_length, 2*sequence_length] + extended_predict_attention_mask = extended_predict_attention_mask.permute(0, 2, 1, 3, 4) + extended_predict_attention_mask = extended_predict_attention_mask.to(predict_attn_weights.dtype) + predict_attn_weights = predict_attn_weights + extended_predict_attention_mask + + predict_attn_probs = softmax( + predict_attn_weights, + dim=-1, + onnx_trace=self.onnx_trace, + ).type_as(predict_attn_weights) + + if layer_head_mask is not None: + assert layer_head_mask.size() == (self.num_attn_heads,), ( + f"Head mask for a single layer should be of size {(self.num_attn_heads,)}, but is" + f" {layer_head_mask.size()}" + ) + predict_attn_probs = layer_head_mask.view(1, 1, -1, 1, 1) * predict_attn_probs + + predict_attn_probs = nn.functional.dropout( + predict_attn_probs, p=self.attention_dropout, training=self.training + ) + # project to attention output + # [batch_size, ngram, number_heads, sequence_length, 2*sequence_length] + # x [batch_size, ngram, number_heads, 2*sequence_length, head_dimesion] + # -> [batch_size, ngram, number_heads, sequence_length, head_dimesion] + predict_attn_output = torch.einsum( + "bnhts,bnhsc->bnhtc", (predict_attn_probs, predict_value_states.transpose(1, 2)) + ) + + # reshape so that num_heads dim is merged into last `head_dim` axis + # [batch_size, ngram, number_heads, sequence_length, head_dimesion] -> [batch_size, ngram, sequence_length, hidden_size] + predict_attn_output = predict_attn_output.transpose(2, 3) + predict_attn_output = predict_attn_output.reshape(batch_size, self.ngram, sequence_length, hidden_size) + predict_attn_output = self.out_proj(predict_attn_output) + + # concat to single attn output + # [batch_size, (1+ngram)*sequence_length, hidden_size] + attn_output = torch.cat([main_attn_output, predict_attn_output], 1).view(batch_size, -1, hidden_size) + # reshape into better form for `config.output_attentions` + main_attn_probs = main_attn_probs.view(batch_size, self.num_attn_heads, sequence_length, -1) + + attn_output = nn.functional.dropout(attn_output, p=self.dropout, training=self.training) + + return attn_output, main_attn_probs, predict_attn_probs, past_key_value + + def get_main_relative_pos_embeddings( + self, hidden_states, attn_weights, position_ids, main_relative_position_buckets + ): + # input hidden_states [batch_size, sequence_length, hidden_size] + # input attn_weights [batch_size, num_heads, sequence_length, sequence_length] + # input position_ids [batch_size, sequence_length] or [1,1] + batch_size, num_attn_heads, tgt_len, src_len = attn_weights.shape + attn_weights = attn_weights.view(batch_size, num_attn_heads, tgt_len, src_len) + if main_relative_position_buckets is None: + batch_size, sequence_length = hidden_states.shape[:2] + relative_positions = ( + torch.arange(1, attn_weights.shape[-1] + 1) + .unsqueeze(0) + .unsqueeze(0) + .repeat(batch_size, sequence_length, 1) + .to(position_ids.device) + ) + # [batch_size, sequence_length, sequence_length+1] + relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1) + main_relative_position_buckets = compute_relative_buckets( + self.num_buckets, self.relative_max_distance, relative_positions, False + ) + + # [batch_size, sequence_length, num_buckets * num_heads] + rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) + rel_pos_embeddings = rel_pos_embeddings.view( + rel_pos_embeddings.shape[:2] + (self.num_buckets, self.num_attn_heads) + ) + rel_pos_embeddings = rel_pos_embeddings.permute(0, 3, 1, 2) + # [batch_size, num_heads, sequence_length, num_buckets] + rel_pos_embeddings = rel_pos_embeddings.reshape(attn_weights.shape[:3] + (-1,)) + + main_relative_position_buckets = main_relative_position_buckets.repeat(1, self.num_attn_heads, 1) + # [batch_size * num_heads * sequence_length, sequence_length] + main_relative_position_buckets = main_relative_position_buckets.view( + -1, main_relative_position_buckets.shape[-1] + ) + main_relative_position_buckets = main_relative_position_buckets.long() + # [batch_size * num_heads * sequence_length, sequence_length] + rel_pos_embeddings = rel_pos_embeddings.reshape(-1, rel_pos_embeddings.size(-1)) + + main_relative_pos_embeddings = torch.gather(rel_pos_embeddings, dim=1, index=main_relative_position_buckets) + main_relative_pos_embeddings = main_relative_pos_embeddings.view(batch_size, num_attn_heads, tgt_len, -1) + return main_relative_pos_embeddings + + def get_predict_relative_pos_embeddings( + self, hidden_states, attn_weights, position_ids, predict_relative_position_buckets + ): + # input hidden_states [batch_size, sequence_length, ngram, hidden_size] + # input attn_weights [batch_size, ngram, num_heads, sequence_length, 2*sequence_length] + # input position_ids [batch_size, sequence_length] or [1,1] + # input predict_relative_position_buckets [batch_size, sequence_length, 2*sequence_length] or None + batch_size, sequence_length = hidden_states.shape[0:2] + + if predict_relative_position_buckets is None: + key_sequence_length = attn_weights.shape[-1] + assert ( + position_ids[0][0] == key_sequence_length - 1 + ), "`position_ids` are incorrect. They should be of the format 1 2 3 4 5 ... (key_sequence_length - 1)" + relative_positions = ( + torch.arange(0, key_sequence_length) + .unsqueeze(0) + .unsqueeze(0) + .repeat(batch_size, sequence_length, 1) + .to(position_ids.device) + ) + + relative_positions = relative_positions - position_ids.unsqueeze(0).repeat(batch_size, sequence_length, 1) + predict_relative_position_buckets = compute_relative_buckets( + self.num_buckets, self.relative_max_distance, relative_positions, False + ) + + # [batch_size, ngram, sequence_length, hidden_size] + hidden_states = hidden_states.transpose(1, 2) + rel_pos_embeddings = self.relative_pos_embeddings(hidden_states) + + # [batch_size, ngram, sequence_length, num_buckets, num_heads] + rel_pos_embeddings = rel_pos_embeddings.view( + hidden_states.shape[:-1] + (self.num_buckets, self.num_attn_heads) + ) + rel_pos_embeddings = rel_pos_embeddings.permute(0, 2, 1, 4, 3) + # [batch_size * ngram * sequence_length * num_heads, num_buckets] + rel_pos_embeddings = rel_pos_embeddings.reshape(-1, self.num_buckets) + # [ngram, batch_size, num_heads * sequence_length, -1] + predict_relative_position_buckets = predict_relative_position_buckets.unsqueeze(0) + predict_relative_position_buckets = predict_relative_position_buckets.repeat( + self.ngram, 1, self.num_attn_heads, 1 + ) + # [ngram * batch_size * num_heads * sequence_length, -1] + predict_relative_position_buckets = predict_relative_position_buckets.view( + -1, predict_relative_position_buckets.size(-1) + ).long() + + predict_relative_pos_embeddings = torch.gather( + rel_pos_embeddings, dim=1, index=predict_relative_position_buckets + ) + + # [batch_size, gram, num_heads, sequence_length, -1] + predict_relative_pos_embeddings = predict_relative_pos_embeddings.view( + batch_size, self.ngram, self.num_attn_heads, sequence_length, -1 + ) + + return predict_relative_pos_embeddings + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetEncoderLayer with ProphetNet->XLMProphetNet, Prophetnet->XLMProphetnet +class XLMProphetNetEncoderLayer(nn.Module): + """ + Encoder block for XLMProphetnet + """ + + def __init__(self, config: XLMProphetNetConfig): + super().__init__() + # 1st residual block + self.self_attn = XLMProphetNetAttention(config, config.num_encoder_attention_heads) + self.self_attn_layer_norm = LayerNorm(config.hidden_size) + + # 2nd residual block + self.feed_forward = XLMProphetNetFeedForward(config, config.encoder_ffn_dim) + self.feed_forward_layer_norm = LayerNorm(config.hidden_size) + + def forward( + self, + hidden_states, + attention_mask, + layer_head_mask, + output_attentions: bool = False, + ): + # 1st residual block + attention_output, attn_weights, _ = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + output_attentions=output_attentions, + ) + hidden_states = self.self_attn_layer_norm(attention_output + hidden_states) + + # 2nd residual block + feed_forward_output = self.feed_forward(hidden_states) + hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderLayer with Prophetnet->XLMProphetnet, ProphetNet->XLMProphetNet +class XLMProphetNetDecoderLayer(nn.Module): + """ + Decoder block for XLMProphetnet + """ + + def __init__(self, config: XLMProphetNetConfig): + super().__init__() + # 1st residual block + self.self_attn = XLMProphetNetNgramSelfAttention(config) + self.self_attn_layer_norm = LayerNorm(config.hidden_size) + + # 2nd residual block + if config.add_cross_attention: + self.cross_attn = XLMProphetNetAttention(config, config.num_decoder_attention_heads) + self.cross_attn_layer_norm = LayerNorm(config.hidden_size) + + # 3rd residual block + self.feed_forward = XLMProphetNetFeedForward(config, config.decoder_ffn_dim) + self.feed_forward_layer_norm = LayerNorm(config.hidden_size) + + def forward( + self, + hidden_states, + attention_mask=None, + encoder_hidden_states=None, + encoder_attn_mask=None, + layer_head_mask=None, + cross_attn_layer_head_mask=None, + extended_predict_attention_mask=None, + main_relative_position_buckets=None, + predict_relative_position_buckets=None, + position_ids=None, + past_key_value=None, + use_cache: bool = True, + output_attentions: bool = False, + ): + # 1st residual block + # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 + self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None + ngram_attention_output, self_attn_weights, self_attn_weights_ngram, present_key_value = self.self_attn( + hidden_states=hidden_states, + past_key_value=self_attn_past_key_value, + attention_mask=attention_mask, + layer_head_mask=layer_head_mask, + extended_predict_attention_mask=extended_predict_attention_mask, + main_relative_position_buckets=main_relative_position_buckets, + predict_relative_position_buckets=predict_relative_position_buckets, + position_ids=position_ids, + ) + hidden_states = self.self_attn_layer_norm(hidden_states + ngram_attention_output) + + # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple + cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None + cross_attn_weights = None + if encoder_hidden_states is not None: + # 2nd residual block + attention_output, cross_attn_weights, cross_attn_present_key_value = self.cross_attn( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attn_mask, + layer_head_mask=cross_attn_layer_head_mask, + past_key_value=cross_attn_past_key_value, + output_attentions=output_attentions, + ) + hidden_states = self.cross_attn_layer_norm(attention_output + hidden_states) + + # add cross-attn to positions 3,4 of present_key_value tuple + present_key_value = present_key_value + cross_attn_present_key_value + + # 3rd residual block + feed_forward_output = self.feed_forward(hidden_states) + hidden_states = self.feed_forward_layer_norm(feed_forward_output + hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights, self_attn_weights_ngram, cross_attn_weights) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +@add_start_docstrings( + "The standalone encoder part of the XLMProphetNetModel.", + XLM_PROPHETNET_START_DOCSTRING, +) +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetEncoder with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET +class XLMProphetNetEncoder(XLMProphetNetPreTrainedModel): + r""" + word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*): + The word embedding parameters. This can be used to initialize [`XLMProphetNetEncoder`] with pre-defined word + embeddings instead of randomly initialized word embeddings. + """ + + def __init__(self, config: XLMProphetNetConfig, word_embeddings: nn.Embedding = None): + super().__init__(config) + + self.word_embeddings = ( + word_embeddings + if word_embeddings is not None + else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + ) + self.position_embeddings = XLMProphetNetPositionalEmbeddings(config) + self.embeddings_layer_norm = LayerNorm(config.hidden_size) + + self.layers = nn.ModuleList([XLMProphetNetEncoderLayer(config) for _ in range(config.num_encoder_layers)]) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.word_embeddings + + def set_input_embeddings(self, value): + self.word_embeddings = value + + @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=BaseModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutput]: + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, XLMProphetNetEncoder + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") + >>> model = XLMProphetNetEncoder.from_pretrained("patrickvonplaten/prophetnet-large-uncased-standalone") + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + ```""" + + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is None and inputs_embeds is None: + raise ValueError("Either input_ids or inputs_embeds has to be passed.") + elif input_ids is not None and inputs_embeds is not None: + raise ValueError("Make sure to only pass input_ids or inputs_embeds.") + elif input_ids is not None and inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + # prepare attention mask + if attention_mask is not None: + extended_attention_mask = ( + 1.0 - attention_mask[:, None, None, :].repeat(1, self.config.num_encoder_attention_heads, 1, 1) + ) * torch.finfo(self.dtype).min + extended_attention_mask = extended_attention_mask.to(inputs_embeds.dtype) + else: + extended_attention_mask = None + + position_embeddings, position_ids = self.position_embeddings(inputs_embeds.shape[:2], inputs_embeds.device) + + hidden_states = inputs_embeds + position_embeddings + hidden_states = self.embeddings_layer_norm(hidden_states) + hidden_states = nn.functional.dropout(hidden_states, p=self.config.dropout, training=self.training) + + encoder_hidden_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + # check if head_mask has a correct number of layers specified if desired + if head_mask is not None: + assert head_mask.size()[0] == ( + len(self.layers) + ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." + for idx, encoder_layer in enumerate(self.layers): + if output_hidden_states: + encoder_hidden_states = encoder_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + encoder_layer.__call__, + hidden_states, + extended_attention_mask, + (head_mask[idx] if head_mask is not None else None), + output_attentions, + ) + else: + layer_outputs = encoder_layer( + hidden_states, + attention_mask=extended_attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + if output_hidden_states: + encoder_hidden_states = encoder_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, encoder_hidden_states, all_attentions] if v is not None) + return BaseModelOutput( + last_hidden_state=hidden_states, hidden_states=encoder_hidden_states, attentions=all_attentions + ) + + +@add_start_docstrings( + "The standalone decoder part of the XLMProphetNetModel.", + XLM_PROPHETNET_START_DOCSTRING, +) +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoder with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET, +class XLMProphetNetDecoder(XLMProphetNetPreTrainedModel): + r""" + word_embeddings (`torch.nn.Embeddings` of shape `(config.vocab_size, config.hidden_size)`, *optional*): + The word embedding parameters. This can be used to initialize [`XLMProphetNetEncoder`] with pre-defined word + embeddings instead of randomly initialized word embeddings. + """ + + def __init__(self, config: XLMProphetNetConfig, word_embeddings: Optional[nn.Embedding] = None): + super().__init__(config) + + self.ngram = config.ngram + self.num_buckets = config.num_buckets + self.relative_max_distance = config.relative_max_distance + self.dropout = config.dropout + self.max_target_positions = config.max_position_embeddings + + self.word_embeddings = ( + word_embeddings + if word_embeddings is not None + else nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + ) + self.position_embeddings = XLMProphetNetPositionalEmbeddings(config) + + self.ngram_embeddings = nn.Embedding(self.ngram, config.hidden_size, None) + self.layers = nn.ModuleList([XLMProphetNetDecoderLayer(config) for _ in range(config.num_decoder_layers)]) + self.embeddings_layer_norm = LayerNorm(config.hidden_size) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.word_embeddings + + def set_input_embeddings(self, value): + self.word_embeddings = value + + @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=XLMProphetNetDecoderModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, XLMProphetNetDecoderModelOutput]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, XLMProphetNetDecoder + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") + >>> model = XLMProphetNetDecoder.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone", add_cross_attention=False) + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> last_hidden_states = outputs.last_hidden_state + ```""" + use_cache = use_cache if use_cache is not None else self.config.use_cache + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is None and inputs_embeds is None: + raise ValueError("Either `decoder_input_ids` or `decoder_inputs_embeds` has to be passed.") + elif input_ids is not None and inputs_embeds is not None: + raise ValueError("Make sure to only pass `decoder_input_ids` or `decoder_inputs_embeds`.") + elif input_ids is not None and inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + + batch_size, sequence_length = inputs_embeds.shape[:2] + + main_stream_pos_embed, position_ids = self.position_embeddings( + (batch_size, sequence_length), + device=inputs_embeds.device, + past_key_values=past_key_values, + ) + + if past_key_values is not None: + main_relative_position_buckets, predict_relative_position_buckets = None, None + else: + ( + main_relative_position_buckets, + predict_relative_position_buckets, + ) = self.compute_buffered_relative_buckets(position_ids) + predicting_stream_pos_embed = self.position_embeddings._forward(position_ids + 1) + + # add position embeddings + hidden_states = inputs_embeds + main_stream_pos_embed + + ngram_embeddings = self.ngram_embeddings.weight + + # prepare attention mask + if past_key_values is not None: + assert ( + hidden_states.size(1) == 1 + ), "At the moment `use_cache` is only supported for `decoder_input_ids` of length 1" + + ngram_hidden_states = [ + (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed).repeat(batch_size, 1, 1) + for ngram in range(self.ngram) + ] + extended_attention_mask = None + extended_predict_attention_mask = None + else: + ngram_hidden_states = [ + (ngram_embeddings[ngram - 1] + predicting_stream_pos_embed) for ngram in range(self.ngram) + ] + extended_attention_mask = self.prepare_attention_mask(hidden_states, attention_mask) + extended_predict_attention_mask = self.prepare_predict_attention_mask(hidden_states, attention_mask) + + # prepare encoder attention mask + if encoder_attention_mask is not None: + extended_encoder_attention_mask = ( + 1.0 - encoder_attention_mask[:, None, None, :].repeat(1, self.config.num_decoder_attention_heads, 1, 1) + ) * torch.finfo(self.dtype).min + extended_encoder_attention_mask = extended_encoder_attention_mask.to(inputs_embeds.dtype) + else: + extended_encoder_attention_mask = None + + hidden_states = torch.cat([hidden_states] + ngram_hidden_states, 1) + + if self.embeddings_layer_norm: + hidden_states = self.embeddings_layer_norm(hidden_states) + + hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) + + # init attentions, hidden_states and cache with empty tuples + all_main_stream_hidden_states = () if output_hidden_states else None + all_ngram_stream_hidden_states = () if output_hidden_states and self.config.ngram > 0 else None + + all_main_stream_attns = () if output_attentions else None + all_ngram_stream_attns = () if output_attentions else None + all_cross_attns = () if output_attentions and self.config.add_cross_attention else None + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + present_key_values = () if use_cache else None + + # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired + for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): + if attn_mask is not None: + assert attn_mask.size()[0] == (len(self.layers)), ( + f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" + f" {head_mask.size()[0]}." + ) + for idx, decoder_layer in enumerate(self.layers): + if output_hidden_states: + # grad cannot be kept because tensor is sliced + all_main_stream_hidden_states += (hidden_states[:, :sequence_length],) + if self.config.ngram > 0: + all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],) + + past_key_value = past_key_values[idx] if past_key_values is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + extended_attention_mask, + encoder_hidden_states, + extended_encoder_attention_mask, + (head_mask[idx] if head_mask is not None else None), + (cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None), + extended_predict_attention_mask, + main_relative_position_buckets, + predict_relative_position_buckets, + position_ids, + None, + use_cache, + output_attentions, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=extended_attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attn_mask=extended_encoder_attention_mask, + layer_head_mask=(head_mask[idx] if head_mask is not None else None), + cross_attn_layer_head_mask=( + cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None + ), + extended_predict_attention_mask=extended_predict_attention_mask, + main_relative_position_buckets=main_relative_position_buckets, + predict_relative_position_buckets=predict_relative_position_buckets, + position_ids=position_ids, + past_key_value=past_key_value, + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + present_key_values += (layer_outputs[4 if output_attentions else 1],) + + if output_attentions: + all_main_stream_attns += (layer_outputs[1],) + all_ngram_stream_attns += (layer_outputs[2],) + + if self.config.add_cross_attention: + all_cross_attns += (layer_outputs[3],) + + if output_hidden_states: + all_main_stream_hidden_states += (hidden_states[:, :sequence_length],) + if self.config.ngram > 0: + all_ngram_stream_hidden_states += (hidden_states[:, sequence_length:],) + + # split last_hidden_state for return + last_hidden_state = hidden_states[:, :sequence_length] + last_hidden_state_ngram = hidden_states[:, sequence_length:] if self.config.ngram > 0 else None + + if not return_dict: + return tuple( + v + for v in [ + last_hidden_state, + last_hidden_state_ngram, + present_key_values, + all_main_stream_hidden_states, + all_ngram_stream_hidden_states, + all_main_stream_attns, + all_ngram_stream_attns, + all_cross_attns, + ] + if v is not None + ) + return XLMProphetNetDecoderModelOutput( + last_hidden_state=last_hidden_state, + last_hidden_state_ngram=last_hidden_state_ngram, + past_key_values=present_key_values, + hidden_states=all_main_stream_hidden_states, + hidden_states_ngram=all_ngram_stream_hidden_states, + attentions=all_main_stream_attns, + ngram_attentions=all_ngram_stream_attns, + cross_attentions=all_cross_attns, + ) + + def compute_buffered_relative_buckets(self, position_ids): + batch_size, sequence_length = position_ids.shape + + position_ids = torch.arange(1, self.max_target_positions).to(position_ids.device).repeat(1, 1) + main_relative_buckets, predict_relative_buckets = compute_all_stream_relative_buckets( + self.num_buckets, self.relative_max_distance, position_ids + ) + + # buffer relative buckets + main_relative_buckets = main_relative_buckets[:, :sequence_length, :sequence_length].repeat(batch_size, 1, 1) + predict_relative_buckets = torch.cat( + [ + predict_relative_buckets[:, :sequence_length, :sequence_length], + predict_relative_buckets[ + :, :sequence_length, self.max_target_positions : self.max_target_positions + sequence_length + ], + ], + 2, + ).repeat(batch_size, 1, 1) + + return main_relative_buckets, predict_relative_buckets + + def prepare_attention_mask(self, hidden_states, attention_mask): + batch_size, seq_length = hidden_states.shape[:2] + + # get causal mask + causal_mask = torch.full( + (seq_length, seq_length), + torch.finfo(hidden_states.dtype).min, + dtype=hidden_states.dtype, + device=hidden_states.device, + ) + causal_mask = torch.triu(causal_mask, 1) + + extended_causal_mask = causal_mask[:seq_length, :seq_length][None, None, :, :].expand( + (batch_size, self.config.num_decoder_attention_heads) + causal_mask.shape + ) + + # add usual attention mask + if attention_mask is not None: + extended_attention_mask = (1.0 - attention_mask[:, None, None, :]) * torch.finfo(self.dtype).min + extended_attention_mask = extended_causal_mask + extended_attention_mask + else: + extended_attention_mask = extended_causal_mask + return extended_attention_mask.to(hidden_states.dtype) + + def prepare_predict_attention_mask(self, hidden_states, attention_mask): + batch_size, seq_length = hidden_states.shape[:2] + + # get causal mask + predict_causal_mask = ngram_attention_bias( + self.max_target_positions, self.ngram, hidden_states.device, hidden_states.dtype + ) + predict_causal_mask = torch.cat( + [ + predict_causal_mask[:, :seq_length, :seq_length], + predict_causal_mask[ + :, :seq_length, self.max_target_positions : self.max_target_positions + seq_length + ], + ], + dim=-1, + ) + extended_predict_causal_mask = predict_causal_mask[None, None, :, :, :].expand( + (batch_size, self.config.num_decoder_attention_heads) + predict_causal_mask.shape + ) + + # add usual attention mask + if attention_mask is not None: + extended_attention_mask = (1.0 - attention_mask[:, None, None, None, :]) * torch.finfo(self.dtype).min + extended_attention_mask = extended_attention_mask.expand( + (batch_size, self.config.num_decoder_attention_heads, self.ngram, seq_length, seq_length) + ) + # predicted stream attention_mask should always be 0 + extended_attention_mask = torch.cat( + [extended_attention_mask, torch.zeros_like(extended_attention_mask)], dim=-1 + ) + extended_predict_attention_mask = extended_predict_causal_mask + extended_attention_mask + else: + extended_predict_attention_mask = extended_predict_causal_mask + return extended_predict_attention_mask.to(hidden_states.dtype) + + +@add_start_docstrings( + "The bare XLMProphetNet Model outputting raw hidden-states without any specific head on top.", + XLM_PROPHETNET_START_DOCSTRING, +) +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetModel with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET +class XLMProphetNetModel(XLMProphetNetPreTrainedModel): + _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight"] + + def __init__(self, config: XLMProphetNetConfig): + super().__init__(config) + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + + encoder_config = copy.deepcopy(config) + encoder_config.is_encoder_decoder = False + encoder_config.use_cache = False + self.encoder = XLMProphetNetEncoder(encoder_config, self.word_embeddings) + + decoder_config = copy.deepcopy(config) + decoder_config.is_decoder = True + decoder_config.is_encoder_decoder = False + self.decoder = XLMProphetNetDecoder(decoder_config, self.word_embeddings) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.word_embeddings + + def set_input_embeddings(self, value): + self.word_embeddings = value + self.encoder.word_embeddings = self.word_embeddings + self.decoder.word_embeddings = self.word_embeddings + + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.encoder.word_embeddings, self.word_embeddings) + self._tie_or_clone_weights(self.decoder.word_embeddings, self.word_embeddings) + + def get_encoder(self): + return self.encoder + + def get_decoder(self): + return self.decoder + + @add_start_docstrings_to_model_forward(XLM_PROPHETNET_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=XLMProphetNetSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.Tensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[Tuple] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + decoder_inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, XLMProphetNetSeq2SeqModelOutput]: + r""" + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, XLMProphetNetModel + + >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") + >>> model = XLMProphetNetModel.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") + + >>> input_ids = tokenizer( + ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" + ... ).input_ids # Batch size 1 + >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 + >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) + + >>> last_hidden_states = outputs.last_hidden_state # main stream hidden states + >>> last_hidden_states_ngram = outputs.last_hidden_state_ngram # predict hidden states + ```""" + use_cache = use_cache if use_cache is not None else self.config.use_cache + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if encoder_outputs is None: + encoder_outputs = self.encoder( + input_ids=input_ids, + attention_mask=attention_mask, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) + decoder_outputs = self.decoder( + input_ids=decoder_input_ids, + attention_mask=decoder_attention_mask, + encoder_hidden_states=encoder_outputs[0], + encoder_attention_mask=attention_mask, + head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=decoder_inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + use_cache=use_cache, + return_dict=return_dict, + ) + + if not return_dict: + return decoder_outputs + encoder_outputs + return XLMProphetNetSeq2SeqModelOutput( + last_hidden_state=decoder_outputs.last_hidden_state, + last_hidden_state_ngram=decoder_outputs.last_hidden_state_ngram, + past_key_values=decoder_outputs.past_key_values, + decoder_hidden_states=decoder_outputs.hidden_states, + decoder_ngram_hidden_states=decoder_outputs.hidden_states_ngram, + decoder_attentions=decoder_outputs.attentions, + decoder_ngram_attentions=decoder_outputs.ngram_attentions, + cross_attentions=decoder_outputs.cross_attentions, + encoder_last_hidden_state=encoder_outputs.last_hidden_state, + encoder_hidden_states=encoder_outputs.hidden_states, + encoder_attentions=encoder_outputs.attentions, + ) + + +@add_start_docstrings( + "The XLMProphetNet Model with a language modeling head. Can be used for sequence generation tasks.", + XLM_PROPHETNET_START_DOCSTRING, +) +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetForConditionalGeneration with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET +class XLMProphetNetForConditionalGeneration(XLMProphetNetPreTrainedModel): + _tied_weights_keys = ["encoder.word_embeddings.weight", "decoder.word_embeddings.weight", "lm_head.weight"] + + def __init__(self, config: XLMProphetNetConfig): + super().__init__(config) + self.prophetnet = XLMProphetNetModel(config) + self.padding_idx = config.pad_token_id + self.disable_ngram_loss = config.disable_ngram_loss + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.prophetnet.word_embeddings, self.lm_head) + + def get_input_embeddings(self): + return self.prophetnet.word_embeddings + + @add_start_docstrings_to_model_forward(XLM_PROPHETNET_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=XLMProphetNetSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + decoder_input_ids: Optional[torch.Tensor] = None, + decoder_attention_mask: Optional[torch.BoolTensor] = None, + head_mask: Optional[torch.Tensor] = None, + decoder_head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + encoder_outputs: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + decoder_inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, XLMProphetNetSeq2SeqLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., + config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for + labels in `[0, ..., config.vocab_size]` + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, XLMProphetNetForConditionalGeneration + + >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") + >>> model = XLMProphetNetForConditionalGeneration.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") + + >>> input_ids = tokenizer( + ... "Studies have been shown that owning a dog is good for you", return_tensors="pt" + ... ).input_ids # Batch size 1 + >>> decoder_input_ids = tokenizer("Studies show that", return_tensors="pt").input_ids # Batch size 1 + >>> outputs = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) + + >>> logits_next_token = outputs.logits # logits to predict next token as usual + >>> logits_ngram_next_tokens = outputs.logits_ngram # logits to predict 2nd, 3rd, ... next tokens + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None: + # get decoder inputs from shifting lm labels to the right + decoder_input_ids = self._shift_right(labels) + + outputs = self.prophetnet( + input_ids=input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + head_mask=head_mask, + decoder_head_mask=decoder_head_mask, + cross_attn_head_mask=cross_attn_head_mask, + encoder_outputs=encoder_outputs, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + decoder_inputs_embeds=decoder_inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + batch_size, sequence_length = ( + decoder_input_ids.shape if decoder_input_ids is not None else decoder_inputs_embeds.shape[:2] + ) + + predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) + predict_logits = self.lm_head(predicting_streams) + + logits = predict_logits[:, 0] + logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None + + # To use .view in loss computation, make sure that logits is contiguous. + if not logits.is_contiguous(): + logits = logits.contiguous() + + loss = None + if labels is not None: + loss = self._compute_loss(predict_logits, labels) + + if not return_dict: + all_logits = tuple(v for v in [logits, logits_ngram] if v is not None) + return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] + else: + return XLMProphetNetSeq2SeqLMOutput( + loss=loss, + logits=logits, + logits_ngram=logits_ngram, + past_key_values=outputs.past_key_values, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_ngram_hidden_states=outputs.decoder_ngram_hidden_states, + decoder_attentions=outputs.decoder_attentions, + decoder_ngram_attentions=outputs.decoder_ngram_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + def _compute_loss(self, logits, labels, ignore_index=-100): + expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index) + + for i in range(self.config.ngram): + if i > 0 and self.disable_ngram_loss: + break + expend_targets[i, :, :] = labels + + logits = logits.transpose(0, 1).contiguous() + lprobs = nn.functional.log_softmax( + logits.view(-1, logits.size(-1)), + dim=-1, + dtype=torch.float32, + ) + + loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean") + + if self.config.eps > 0.0: + smooth_loss = -lprobs.sum(dim=-1, keepdim=True) + non_masked_tokens = expend_targets.ne(ignore_index).view(-1) + smooth_loss = smooth_loss[non_masked_tokens] + smooth_loss = smooth_loss.mean() + + eps_i = self.config.eps / lprobs.size(-1) + loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss + + return loss + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + past_key_values=None, + attention_mask=None, + head_mask=None, + decoder_head_mask=None, + cross_attn_head_mask=None, + use_cache=None, + encoder_outputs=None, + **kwargs, + ): + assert encoder_outputs is not None, "`encoder_outputs` have to be passed for generation." + + if past_key_values: + decoder_input_ids = decoder_input_ids[:, -1:] + # first step, decoder_cached_states are empty + return { + "input_ids": None, # encoder_outputs is defined. input_ids not needed + "encoder_outputs": encoder_outputs, + "past_key_values": past_key_values, + "decoder_input_ids": decoder_input_ids, + "attention_mask": attention_mask, + "head_mask": head_mask, + "decoder_head_mask": decoder_head_mask, + "cross_attn_head_mask": cross_attn_head_mask, + "use_cache": use_cache, + } + + def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): + return self._shift_right(labels) + + @staticmethod + # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration._reorder_cache + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + # cached cross_attention states don't have to be reordered -> they are always the same + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past[:2]) + + layer_past[2:], + ) + return reordered_past + + def get_encoder(self): + return self.prophetnet.encoder + + def get_decoder(self): + return self.prophetnet.decoder + + +@add_start_docstrings( + "The standalone decoder part of the XLMProphetNetModel with a lm head on top. The model can be used for causal" + " language modeling.", + XLM_PROPHETNET_START_DOCSTRING, +) +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetForCausalLM with microsoft/prophetnet-large-uncased->patrickvonplaten/xprophetnet-large-uncased-standalone, ProphetNet->XLMProphetNet, PROPHETNET->XLM_PROPHETNET +class XLMProphetNetForCausalLM(XLMProphetNetPreTrainedModel): + _tied_weights_keys = [ + "prophetnet.word_embeddings.weight", + "prophetnet.decoder.word_embeddings.weight", + "lm_head.weight", + ] + + def __init__(self, config: XLMProphetNetConfig): + # set config for CLM + config = copy.deepcopy(config) + config.is_decoder = True + config.is_encoder_decoder = False + super().__init__(config) + self.prophetnet = XLMProphetNetDecoderWrapper(config) + + self.padding_idx = config.pad_token_id + self.disable_ngram_loss = config.disable_ngram_loss + + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.prophetnet.decoder.word_embeddings + + def set_input_embeddings(self, value): + self.prophetnet.decoder.word_embeddings = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def _tie_weights(self): + if self.config.tie_word_embeddings: + self._tie_or_clone_weights(self.prophetnet.decoder.word_embeddings, self.lm_head) + + def set_decoder(self, decoder): + self.prophetnet.decoder = decoder + + def get_decoder(self): + return self.prophetnet.decoder + + @add_start_docstrings_to_model_forward(XLM_PROPHETNET_STANDALONE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=XLMProphetNetDecoderLMOutput, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + cross_attn_head_mask: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, XLMProphetNetDecoderLMOutput]: + r""" + encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if + the model is configured as a decoder. + encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in + the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: + cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): + Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `decoder_input_ids` of shape `(batch_size, sequence_length)`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in + `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are + ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` + + Returns: + + Example: + + ```python + >>> from transformers import AutoTokenizer, XLMProphetNetForCausalLM + >>> import torch + + >>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") + >>> model = XLMProphetNetForCausalLM.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") + >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." + >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") + >>> outputs = model(**inputs) + + >>> logits = outputs.logits + + >>> # Model can also be used with EncoderDecoder framework + >>> from transformers import BertTokenizer, EncoderDecoderModel, AutoTokenizer + >>> import torch + + >>> tokenizer_enc = BertTokenizer.from_pretrained("google-bert/bert-large-uncased") + >>> tokenizer_dec = AutoTokenizer.from_pretrained("patrickvonplaten/xprophetnet-large-uncased-standalone") + >>> model = EncoderDecoderModel.from_encoder_decoder_pretrained( + ... "google-bert/bert-large-uncased", "patrickvonplaten/xprophetnet-large-uncased-standalone" + ... ) + + >>> ARTICLE = ( + ... "the us state department said wednesday it had received no " + ... "formal word from bolivia that it was expelling the us ambassador there " + ... "but said the charges made against him are `` baseless ." + ... ) + >>> input_ids = tokenizer_enc(ARTICLE, return_tensors="pt").input_ids + >>> labels = tokenizer_dec( + ... "us rejects charges against its ambassador in bolivia", return_tensors="pt" + ... ).input_ids + >>> outputs = model(input_ids=input_ids, decoder_input_ids=labels[:, :-1], labels=labels[:, 1:]) + + >>> loss = outputs.loss + ```""" + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, past_key_values, dec_hidden, dec_attn) + outputs = self.prophetnet.decoder( + input_ids=input_ids, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + head_mask=head_mask, + cross_attn_head_mask=cross_attn_head_mask, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + batch_size, sequence_length = input_ids.shape if input_ids is not None else inputs_embeds.shape[:2] + + predicting_streams = outputs[1].view(batch_size, self.config.ngram, sequence_length, -1) + predict_logits = self.lm_head(predicting_streams) + + logits = predict_logits[:, 0] + logits_ngram = predict_logits[:, 1:] if self.config.ngram > 1 else None + + loss = None + if labels is not None: + loss = self._compute_loss(predict_logits, labels) + + if not return_dict: + all_logits = tuple(v for v in [logits, logits_ngram] if v is not None) + return (loss,) + all_logits + outputs[2:] if loss is not None else all_logits + outputs[2:] + else: + return XLMProphetNetDecoderLMOutput( + loss=loss, + logits=logits, + logits_ngram=logits_ngram, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + hidden_states_ngram=outputs.hidden_states_ngram, + attentions=outputs.attentions, + ngram_attentions=outputs.ngram_attentions, + cross_attentions=outputs.cross_attentions, + ) + + def _compute_loss(self, logits, labels, ignore_index=-100): + expend_targets = labels.new_zeros(self.config.ngram, labels.size(0), labels.size(1)).fill_(ignore_index) + + for i in range(self.config.ngram): + if i > 0 and self.disable_ngram_loss: + break + expend_targets[i, :, :] = labels + + logits = logits.transpose(0, 1).contiguous() + lprobs = nn.functional.log_softmax( + logits.view(-1, logits.size(-1)), + dim=-1, + dtype=torch.float32, + ) + + loss = nn.functional.nll_loss(lprobs, expend_targets.view(-1), reduction="mean") + + if self.config.eps > 0.0: + smooth_loss = -lprobs.sum(dim=-1, keepdim=True) + non_masked_tokens = expend_targets.ne(ignore_index).view(-1) + smooth_loss = smooth_loss[non_masked_tokens] + smooth_loss = smooth_loss.mean() + + eps_i = self.config.eps / lprobs.size(-1) + loss = (1.0 - self.config.eps) * loss + eps_i * smooth_loss + + return loss + + def prepare_inputs_for_generation( + self, + input_ids, + past_key_values=None, + attention_mask=None, + head_mask=None, + use_cache=None, + **kwargs, + ): + # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly + if attention_mask is None: + attention_mask = input_ids.new_ones(input_ids.shape) + + if past_key_values: + input_ids = input_ids[:, -1:] + # first step, decoder_cached_states are empty + return { + "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed + "attention_mask": attention_mask, + "head_mask": head_mask, + "past_key_values": past_key_values, + "use_cache": use_cache, + } + + @staticmethod + # Copied from transformers.models.bart.modeling_bart.BartForCausalLM._reorder_cache + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past + + +# Copied from transformers.models.prophetnet.modeling_prophetnet.ProphetNetDecoderWrapper with ProphetNet->XLMProphetNet, prophetnet->XLMProphetNet +class XLMProphetNetDecoderWrapper(XLMProphetNetPreTrainedModel): + """ + This is a wrapper class, so that [`XLMProphetNetForCausalLM`] can correctly be loaded from pretrained XLMProphetNet + classes. + """ + + def __init__(self, config: XLMProphetNetConfig): + super().__init__(config) + + self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) + self.decoder = XLMProphetNetDecoder(config, word_embeddings=self.word_embeddings) + + # Initialize weights and apply final processing + self.post_init() + + def _tie_weights(self): + self._tie_or_clone_weights(self.word_embeddings, self.decoder.get_input_embeddings()) + + def forward(self, *args, **kwargs): + return self.decoder(*args, **kwargs) diff --git a/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py new file mode 100644 index 0000000000000000000000000000000000000000..fa65fa5cbfbaf2fccd6fcad94bf68a3ef9ed22d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/xlm_prophetnet/tokenization_xlm_prophetnet.py @@ -0,0 +1,323 @@ +# coding=utf-8 +# Copyright 2020 The Microsoft Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import collections +import os +from shutil import copyfile +from typing import Any, Dict, List, Optional, Tuple + +from ...tokenization_utils import PreTrainedTokenizer +from ...utils import logging + + +logger = logging.get_logger(__name__) + +SPIECE_UNDERLINE = "▁" + +VOCAB_FILES_NAMES = {"vocab_file": "prophetnet.tokenizer"} + + +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + for index, token in enumerate(tokens): + token = token.rstrip("\n") + vocab[token] = index + return vocab + + +class XLMProphetNetTokenizer(PreTrainedTokenizer): + """ + Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on + [SentencePiece](https://github.com/google/sentencepiece). + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + Path to the vocabulary file. + bos_token (`str`, *optional*, defaults to `"[SEP]"`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + + + + When building a sequence using special tokens, this is not the token that is used for the beginning of + sequence. The token used is the `cls_token`. + + + + eos_token (`str`, *optional*, defaults to `"[SEP]"`): + The end of sequence token. + + + + When building a sequence using special tokens, this is not the token that is used for the end of sequence. + The token used is the `sep_token`. + + + + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + sp_model_kwargs (`dict`, *optional*): + Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for + SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, + to set: + + - `enable_sampling`: Enable subword regularization. + - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. + + - `nbest_size = {0,1}`: No sampling is performed. + - `nbest_size > 1`: samples from the nbest_size results. + - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) + using forward-filtering-and-backward-sampling algorithm. + + - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for + BPE-dropout. + + Attributes: + sp_model (`SentencePieceProcessor`): + The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). + """ + + vocab_files_names = VOCAB_FILES_NAMES + model_input_names = ["input_ids", "attention_mask"] + + def __init__( + self, + vocab_file, + bos_token="[SEP]", + eos_token="[SEP]", + sep_token="[SEP]", + unk_token="[UNK]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + sp_model_kwargs: Optional[Dict[str, Any]] = None, + **kwargs, + ) -> None: + self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs + + try: + import sentencepiece as spm + except ImportError: + logger.warning( + "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" + " pip install sentencepiece" + ) + raise + + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(str(vocab_file)) + self.vocab_file = vocab_file + + # Original fairseq vocab and spm vocab must be "aligned": + # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 + # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- + # fairseq | '' | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-' + # spm | '' | '' | '' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' + + # put special tokens and [unused] tokens into the vocab + self.fairseq_tokens_to_ids = {"[PAD]": 0, "[CLS]": 1, "[SEP]": 2, "[UNK]": 3, "[MASK]": 4} + + for i in range(10): + tok = f"[unused{i}]" + self.fairseq_tokens_to_ids[tok] = 5 + i + + # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab + self.fairseq_offset = 12 + self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} + + # TODO ArthurZ fairseq_ids_to_tokens should be removed + + super().__init__( + bos_token=bos_token, + eos_token=eos_token, + sep_token=sep_token, + unk_token=unk_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + sp_model_kwargs=self.sp_model_kwargs, + **kwargs, + ) + + @property + def can_save_slow_tokenizer(self) -> bool: + return os.path.isfile(self.vocab_file) if self.vocab_file else False + + def __getstate__(self): + state = self.__dict__.copy() + state["sp_model"] = None + return state + + def __setstate__(self, d): + self.__dict__ = d + try: + import sentencepiece as spm + except ImportError: + logger.warning( + "You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece" + " pip install sentencepiece" + ) + raise + + # for backward compatibility + if not hasattr(self, "sp_model_kwargs"): + self.sp_model_kwargs = {} + + self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) + self.sp_model.Load(self.vocab_file) + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is None: + return ([0] * len(token_ids_0)) + [1] + return ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLMProphetNet + does not make use of token type ids, therefore a list of zeros is returned. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of zeros. + + """ + + sep = [self.sep_token_id] + + if token_ids_1 is None: + return len(token_ids_0 + sep) * [0] + return len(token_ids_0 + sep + sep + token_ids_1 + sep) * [0] + + @property + def vocab_size(self): + return len(self.sp_model) + self.fairseq_offset + + def get_vocab(self): + vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} + vocab.update(self.added_tokens_encoder) + return vocab + + def _tokenize(self, text: str) -> str: + return self.sp_model.encode(text, out_type=str) + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + if token in self.fairseq_tokens_to_ids: + return self.fairseq_tokens_to_ids[token] + spm_id = self.sp_model.PieceToId(token) + + # Need to return unknown token if the SP model returned 0 + return spm_id + self.fairseq_offset if spm_id else self.unk_token_id + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + if index in self.fairseq_ids_to_tokens: + return self.fairseq_ids_to_tokens[index] + return self.sp_model.IdToPiece(index - self.fairseq_offset) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (strings for sub-words) in a single string.""" + out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() + return out_string + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + if not os.path.isdir(save_directory): + logger.error(f"Vocabulary path ({save_directory}) should be a directory") + return + out_vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + + if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): + copyfile(self.vocab_file, out_vocab_file) + elif not os.path.isfile(self.vocab_file): + with open(out_vocab_file, "wb") as fi: + content_spiece_model = self.sp_model.serialized_model_proto() + fi.write(content_spiece_model) + + return (out_vocab_file,) + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A XLMProphetNet sequence has the following format: + + - single sequence: `X [SEP]` + - pair of sequences: `A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: list of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + + if token_ids_1 is None: + return token_ids_0 + [self.sep_token_id] + sep = [self.sep_token_id] + return token_ids_0 + sep + token_ids_1 + sep