diff --git a/.gitattributes b/.gitattributes index df58a4a76c3faf9d977b58a8ceea4f3dade9e9c1..ec7fd0219411df04dfe06e981689c8ec0a7f1aa5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -151,3 +151,6 @@ venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/nvdisasm filter=lf venv/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1600 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/ckpts/universal/global_step20/mp_rank_03_model_states.pt b/ckpts/universal/global_step20/mp_rank_03_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..92320fb6aba9f17012158bd4d081ee25850cb114 --- /dev/null +++ b/ckpts/universal/global_step20/mp_rank_03_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f922f6f6c964c508b570592253a50cc3d268d45fda0d3873521cc568562d82e +size 4230020 diff --git a/ckpts/universal/global_step20/mp_rank_04_model_states.pt b/ckpts/universal/global_step20/mp_rank_04_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cfbcc52d90cc30770205eaedddcac018c2634865 --- /dev/null +++ b/ckpts/universal/global_step20/mp_rank_04_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ab24047ab783a2e40cbd1e7ae7898b4c5b94d1078cb71d552de5cbef470cb31 +size 4230084 diff --git a/ckpts/universal/global_step20/mp_rank_05_model_states.pt b/ckpts/universal/global_step20/mp_rank_05_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..449a18cf8e2e9919b0ccccde83cfa0d19eaa6f5f --- /dev/null +++ b/ckpts/universal/global_step20/mp_rank_05_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1fcf2f7f884d10105f67af48d78acd027923f4a35c988139deb4d8a10141b73 +size 4230084 diff --git a/ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/fp32.pt b/ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/fp32.pt new file mode 100644 index 0000000000000000000000000000000000000000..1f257bbbcaaecb1b155ed496d4a9727e0e577fbf --- /dev/null +++ b/ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/fp32.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42a9faf49c7474b63052bd0c696414279f710add6782b29f724602caf04492c7 +size 33555533 diff --git a/lm-evaluation-harness/tests/testdata/anagrams2-v0-greedy_until b/lm-evaluation-harness/tests/testdata/anagrams2-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..9db9d158dc07c46ddb5bc88ea797cc41080ca941 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/anagrams2-v0-greedy_until @@ -0,0 +1 @@ +6700a3c44e48abe8337238dcbe3b54cf4abafe0c204c52d921e590872fbd05e7 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/arc_challenge-v2.0-res.json b/lm-evaluation-harness/tests/testdata/arc_challenge-v2.0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..f0186c4c4b395b6f57e26120975ec0378cd9c0ea --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/arc_challenge-v2.0-res.json @@ -0,0 +1 @@ +{"results": {"arc_challenge": {"acc": 0.26621160409556316, "acc_norm": 0.28242320819112626, "acc_norm_stderr": 0.01315545688409722, "acc_stderr": 0.01291577478152323}}, "versions": {"arc_challenge": "2.0"}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_2dm-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/arithmetic_2dm-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..7b7adaf86251b258f270478b8310660d56a15f4a --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/arithmetic_2dm-v0-loglikelihood @@ -0,0 +1 @@ +14ac5e510cdf82967d6827a9ca059906ee1db2e347be1b17f36403a157e73552 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_2ds-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/arithmetic_2ds-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..28f32c92c67df30eb1548fd27939b45b484a4cbc --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/arithmetic_2ds-v0-loglikelihood @@ -0,0 +1 @@ +66f7ff3b40251ee38fadcbee658e309a200224356fc3efa07d0a490a2c24bfa3 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_3da-v0-res.json b/lm-evaluation-harness/tests/testdata/arithmetic_3da-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..1bbb3eb0c26b177cb739f58d8098b339278fcd84 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/arithmetic_3da-v0-res.json @@ -0,0 +1 @@ +{"results": {"arithmetic_3da": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_3da": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_anaphor_gender_agreement-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_anaphor_gender_agreement-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..1c39ab70454e6589ea0c506e3e98bbd5a21449bf --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_anaphor_gender_agreement-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_anaphor_gender_agreement": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_anaphor_gender_agreement": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_anaphor_number_agreement-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_anaphor_number_agreement-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..68bbe21379d0d6326ce5cc07b0a2bc1589ed73df --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_anaphor_number_agreement-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_anaphor_number_agreement": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_anaphor_number_agreement": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_complex_NP_island-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_complex_NP_island-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..3a6d0875c6aadaf550692275a3ecd0b3ac099d3c --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_complex_NP_island-v0-loglikelihood @@ -0,0 +1 @@ +f46cfcc7e43050a235fd2a6b989cabbfbcce76786df74db9f0d4a9cd1caa1628 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_1-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_1-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..defc3560d98de3c640d8e7f41e5bf9bf95d34aa4 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_1-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_determiner_noun_agreement_with_adj_irregular_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_with_adj_irregular_1": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_intransitive-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_intransitive-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..d5b2f91179f553c61c519f50380d6f36fcb6240d --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_intransitive-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_intransitive": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_intransitive": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..1bda1a2aa9c1eeee68b3ca88f2de38cbb8e5d67b --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_principle_A_domain_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_2": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..44ea10c1380c3dccdbc8d2ad6a2d84e716e81773 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_tough_vs_raising_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_tough_vs_raising_1": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_sexual_orientation-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_english_sexual_orientation-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..0a58b730c1e43271ba9d287c6b645ab97d10a560 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_sexual_orientation-v0-loglikelihood @@ -0,0 +1 @@ +e754a309296b157677dfba6e6feef983d1ce38dd0169ae726265621a7b573163 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_disability-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_french_disability-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..9cc4d2bb8012080bb2030e494eebd97e945b203c --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_disability-v0-loglikelihood @@ -0,0 +1 @@ +fa1e5fc7492a66c9a90765e605003c38408347617db5ecf36706f1d374af5d42 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_physical_appearance-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_french_physical_appearance-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..167b5e3ba055d1d67ca70e4f9cd3879f6b40b179 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_physical_appearance-v0-loglikelihood @@ -0,0 +1 @@ +ea61eaad64e9292790d4bbef955ffeebed7a595de098bc5ac726a6e51f27f9af \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/drop-v1-res.json b/lm-evaluation-harness/tests/testdata/drop-v1-res.json new file mode 100644 index 0000000000000000000000000000000000000000..8f397b410df7a77e25fd2916787b6050a5806d1d --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/drop-v1-res.json @@ -0,0 +1 @@ +{"results": {"drop": {"em": 0.0, "em_stderr": 0.0, "f1": 0.0, "f1_stderr": 0.0}}, "versions": {"drop": 1}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..7f665ef4a1bd06ecfd30d999ae6880c00ba849cf --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-loglikelihood @@ -0,0 +1 @@ +c29e4e67ff91af29b9434884874414d1b1b32ccc32903c6b1639469b19907419 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-conceptual_physics-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-conceptual_physics-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..1388bcdcd9a7283decbf3680283a2cc4cfc7cfde --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-conceptual_physics-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-conceptual_physics": {"acc": 0.2680851063829787, "acc_norm": 0.2553191489361702, "acc_norm_stderr": 0.028504856470514185, "acc_stderr": 0.028957342788342347}}, "versions": {"hendrycksTest-conceptual_physics": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_geography-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_geography-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..0fb76aa9ba10e63a931ec3707ac6d9f3d84292aa --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_geography-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-high_school_geography": {"acc": 0.2474747474747475, "acc_norm": 0.2777777777777778, "acc_norm_stderr": 0.03191178226713547, "acc_stderr": 0.03074630074212452}}, "versions": {"hendrycksTest-high_school_geography": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_world_history-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_world_history-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..228dfe072cd02f94bced495f271c5cc108850719 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_world_history-v0-loglikelihood @@ -0,0 +1 @@ +1c8b994bd9a63ec874fc8d0e3a27077118b7adc472306b2fd6c55635a78b9d52 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-logical_fallacies-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-logical_fallacies-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..a5807b5831206a8cb29b2e99c02c7c6025dd6f25 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-logical_fallacies-v0-loglikelihood @@ -0,0 +1 @@ +2e9449dd803f9e2334dc562d9f04031fd013ed36b883b44ab500533a5dbbface \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-prehistory-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-prehistory-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..4c01847ef594713fee284436be6fe8d20d602554 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-prehistory-v0-loglikelihood @@ -0,0 +1 @@ +6983c560a562749f4f702249a3a6ae51fa495acc0643a980bf2cf52c6c5d4b95 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai-v2.0-res.json b/lm-evaluation-harness/tests/testdata/lambada_openai-v2.0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..12e7f527bde7683ea74111603618c1e99cdd93a6 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/lambada_openai-v2.0-res.json @@ -0,0 +1 @@ +{"results": {"lambada_openai": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai": "2.0"}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai_cloze-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/lambada_openai_cloze-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..b599a89f7af0c28e795e5c5dfc1961f34acde2fc --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/lambada_openai_cloze-v0-loglikelihood @@ -0,0 +1 @@ +7655e748b63ae7e9911411d2d2a2577221d6c861ca4448509992541294d689f3 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/lambada_openai_mt_en-v0-res.json b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_en-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..f7fdfc9c2d5c6d5d4abb7d6e932454615c095ea1 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/lambada_openai_mt_en-v0-res.json @@ -0,0 +1 @@ +{"results": {"lambada_openai_mt_en": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_mt_en": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/math_algebra-v1-res.json b/lm-evaluation-harness/tests/testdata/math_algebra-v1-res.json new file mode 100644 index 0000000000000000000000000000000000000000..10d18c2f864117ae56fe56ba1191f6cde4bec7b3 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/math_algebra-v1-res.json @@ -0,0 +1 @@ +{"results": {"math_algebra": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_algebra": 1}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v0-greedy_until b/lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..3ab10de26a038019a18699e20887de6da66981c4 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v0-greedy_until @@ -0,0 +1 @@ +d53c699de272d517ed7ad783b4e692302be9f9f97a8d4ac7a6541e538a7cabe0 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_enron-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_enron-v1-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..57dbe764605ef5e1e4578682549a001c851704c0 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_enron-v1-loglikelihood_rolling @@ -0,0 +1 @@ +4baa6ccdc9e3aa9921675ab4400d5e89d7b546b844a8ea28f6461d649066418a \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_github-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_github-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..bdabf399695d155026643eacca7954c5f87009d5 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_github-v0-res.json @@ -0,0 +1 @@ +{"results": {"pile_github": {"bits_per_byte": 9.540627613754646e-05, "byte_perplexity": 1.0000954108274611, "word_perplexity": 1.0009643183931227}}, "versions": {"pile_github": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..333c2970fa21b0bd53b77bdc3880acad0c8d6459 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v0-res.json @@ -0,0 +1 @@ +{"results": {"pile_pubmed-abstracts": {"bits_per_byte": 0.00037553733051528816, "byte_perplexity": 1.0003756078534862, "word_perplexity": 1.0025884332779}}, "versions": {"pile_pubmed-abstracts": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_uspto-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_uspto-v1-res.json new file mode 100644 index 0000000000000000000000000000000000000000..599ae44ef430af958ab53c57d0b7900928ad243a --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_uspto-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_uspto": {"bits_per_byte": 0.000174024142670342, "byte_perplexity": 1.00012063161925, "word_perplexity": 1.0007716198916954}}, "versions": {"pile_uspto": 1}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..b58ce148f0071707d5da39135aaeb92a2a1457a2 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v0-res.json @@ -0,0 +1 @@ +{"results": {"pile_youtubesubtitles": {"bits_per_byte": 2.3447170928931888e-05, "byte_perplexity": 1.000023447445816, "word_perplexity": 1.0001529192262875}}, "versions": {"pile_youtubesubtitles": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v1-res.json new file mode 100644 index 0000000000000000000000000000000000000000..fcf2faa8bc7927212fa7c55940849f64d3c48968 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_youtubesubtitles": {"bits_per_byte": 3.3827117222045906e-05, "byte_perplexity": 1.000023447445816, "word_perplexity": 1.0001529192262875}}, "versions": {"pile_youtubesubtitles": 1}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/sciq-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/sciq-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..25ce988773df6dd27009a4ac47357dfb7d70748e --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/sciq-v0-loglikelihood @@ -0,0 +1 @@ +71cbb6e2a7ac4512c3761ea801d420eb3fac49d158c7e4deaa3ab8727bea923c \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/squad2-v1-greedy_until b/lm-evaluation-harness/tests/testdata/squad2-v1-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..70df2fd6ae1f59de5b6f3f6712bc2331197400c8 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/squad2-v1-greedy_until @@ -0,0 +1 @@ +e17e3d85c1d5adaf2d6b4b752c4babc2e0b3a6e144e6de70cb3b2287e85109b8 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/sst-v0-res.json b/lm-evaluation-harness/tests/testdata/sst-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..5fe3c62a205cdd7a57acaf082f671e9ba864e5f7 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/sst-v0-res.json @@ -0,0 +1 @@ +{"results": {"sst": {"acc": 0.5172018348623854, "acc_stderr": 0.016931824425903734}}, "versions": {"sst": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/swag-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/swag-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..c8152027dc2f15319676bc32d55f32ca0bec00b5 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/swag-v0-loglikelihood @@ -0,0 +1 @@ +be4fcbad876124c4ba3c71970538a97fec0e36a9cc677c70b6c9243a7bcee0ec \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/triviaqa-v0-res.json b/lm-evaluation-harness/tests/testdata/triviaqa-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..ab98847da6985f5c9d1e650008367ba739a1147f --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/triviaqa-v0-res.json @@ -0,0 +1 @@ +{"results": {"triviaqa": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"triviaqa": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wic-v0-res.json b/lm-evaluation-harness/tests/testdata/wic-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..eadc573ed3d6a9b8b9bd924896ef5d719a53d5d1 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wic-v0-res.json @@ -0,0 +1 @@ +{"results": {"wic": {"acc": 0.49216300940438873, "acc_stderr": 0.01980828765781383}}, "versions": {"wic": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt16-de-en-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt16-de-en-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..75f1072b6e7f2bdd9ecd98987c86fefd3375fb6d --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt16-de-en-v0-greedy_until @@ -0,0 +1 @@ +d30e23e38d9a45b9c31e1dfd14b58d0b7020df4b9c8a1c697aa6bc5fba8ce08a \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt16-de-en-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt16-de-en-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..826e0382abb32da67ac0dd0b271527b40fd3b6ae --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt16-de-en-v0-res.json @@ -0,0 +1 @@ +{"results": {"wmt16-de-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.013700416764482968, "chrf_stderr": 0.00016071651360909355, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt16-de-en": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-cs-en-v0-greedy_until b/lm-evaluation-harness/tests/testdata/wmt20-cs-en-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..7bcf240b7090e406259d4bfc090d1eb22ec6e291 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-cs-en-v0-greedy_until @@ -0,0 +1 @@ +bfead9efdb1b2402a414c55929c8d8f956585f938a35466931d44e81d89cfe00 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-de-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-en-de-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..183e66270a61dcb463076306b2768dde7995162f --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-en-de-v0-res.json @@ -0,0 +1 @@ +{"results": {"wmt20-en-de": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.007148103038872972, "chrf_stderr": 9.594096858911254e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-de": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-km-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-en-km-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..e5ee2e9be911cda88b6445715b833e1a0dbf92dd --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-en-km-v0-res.json @@ -0,0 +1 @@ +{"results": {"wmt20-en-km": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 1.9008351315007364e-05, "chrf_stderr": 7.136657625458525e-06, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-km": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-pl-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-en-pl-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..13bfd5b552b92b771266666dd5fe5b9496064051 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-en-pl-v0-res.json @@ -0,0 +1 @@ +{"results": {"wmt20-en-pl": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.009006977773147825, "chrf_stderr": 0.00023387733367766675, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-pl": 0}} \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..510de705645fb4f9971845f16d9cccb077375853 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9f843f6319551667349e0285280b415b7e1838274395c5397d12fd5c9f54161 +size 1331992 diff --git a/venv/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..83f750d35235e606449842db9ee4f484fa7344f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3bce8ab2cb4dd8ce9d849f9ab25a0a469043e16928545398d21a6afbb127a05d +size 4528352 diff --git a/venv/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 b/venv/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 new file mode 100644 index 0000000000000000000000000000000000000000..dcd455728785a273fdd6655b1a6bf3251a7fe7f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d235cdfb4c840988aa4aae7c4b8b8e21f1524ab6a3e4296afd854c7d33d3af96 +size 2753864 diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.emptyFile.jsn.gz b/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.emptyFile.jsn.gz new file mode 100644 index 0000000000000000000000000000000000000000..6bfec839e4d42402efb1845ad1ee9d23be951177 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/data/orc/TestOrcFile.emptyFile.jsn.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c4b8c05ddfb7b1cc770c2c9e02c9b14cedddbf5723f4a46f628a4a7b9ad03622 +size 50 diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/bark/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..03e5865ca4a483c76071e57e3a5b45fca82744a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/bark/__init__.py @@ -0,0 +1,79 @@ +# Copyright 2023 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_torch_available, +) + + +_import_structure = { + "configuration_bark": [ + "BARK_PRETRAINED_CONFIG_ARCHIVE_MAP", + "BarkCoarseConfig", + "BarkConfig", + "BarkFineConfig", + "BarkSemanticConfig", + ], + "processing_bark": ["BarkProcessor"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_bark"] = [ + "BARK_PRETRAINED_MODEL_ARCHIVE_LIST", + "BarkFineModel", + "BarkSemanticModel", + "BarkCoarseModel", + "BarkModel", + "BarkPreTrainedModel", + "BarkCausalModel", + ] + +if TYPE_CHECKING: + from .configuration_bark import ( + BARK_PRETRAINED_CONFIG_ARCHIVE_MAP, + BarkCoarseConfig, + BarkConfig, + BarkFineConfig, + BarkSemanticConfig, + ) + from .processing_bark import BarkProcessor + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_bark import ( + BARK_PRETRAINED_MODEL_ARCHIVE_LIST, + BarkCausalModel, + BarkCoarseModel, + BarkFineModel, + BarkModel, + BarkPreTrainedModel, + BarkSemanticModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43091490c5ae77ece9595386faa66b71a73ef6cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/configuration_bark.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/configuration_bark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61a81a96ad6f2591d495f4804042551f3ae8f52a Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/configuration_bark.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/convert_suno_to_hf.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/convert_suno_to_hf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01f7751d8ff929bceb42cf5ae06076e06bd2995e Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/convert_suno_to_hf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/generation_configuration_bark.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/generation_configuration_bark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c332137e3e2376ff27109391a03802c13decb10 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/generation_configuration_bark.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/modeling_bark.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/modeling_bark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c77f66ffc234e491979b340a0748bcd30da9aa1f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/modeling_bark.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/processing_bark.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/processing_bark.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92fa541278ae136d9d8c4701306f367a2d481dce Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/bark/__pycache__/processing_bark.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/configuration_bark.py b/venv/lib/python3.10/site-packages/transformers/models/bark/configuration_bark.py new file mode 100644 index 0000000000000000000000000000000000000000..a6bf2b546af1fd8a1ca5d503c39979be94580ede --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/bark/configuration_bark.py @@ -0,0 +1,325 @@ +# coding=utf-8 +# Copyright 2023 The Suno AI Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" BARK model configuration""" + +import os +from typing import Dict, Optional, Union + +from ...configuration_utils import PretrainedConfig +from ...utils import add_start_docstrings, logging +from ..auto import CONFIG_MAPPING + + +logger = logging.get_logger(__name__) + + +BARK_SUBMODELCONFIG_START_DOCSTRING = """ + This is the configuration class to store the configuration of a [`{model}`]. It is used to instantiate the model + according to the specified arguments, defining the model architecture. Instantiating a configuration with the + defaults will yield a similar configuration to that of the Bark [suno/bark](https://huggingface.co/suno/bark) + architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + block_size (`int`, *optional*, defaults to 1024): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + input_vocab_size (`int`, *optional*, defaults to 10_048): + Vocabulary size of a Bark sub-model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`{model}`]. Defaults to 10_048 but should be carefully thought with + regards to the chosen sub-model. + output_vocab_size (`int`, *optional*, defaults to 10_048): + Output vocabulary size of a Bark sub-model. Defines the number of different tokens that can be represented + by the: `output_ids` when passing forward a [`{model}`]. Defaults to 10_048 but should be carefully thought + with regards to the chosen sub-model. + num_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the given sub-model. + num_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer architecture. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the "intermediate" (often named feed-forward) layer in the architecture. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + bias (`bool`, *optional*, defaults to `True`): + Whether or not to use bias in the linear layers and layer norm layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). +""" + + +class BarkSubModelConfig(PretrainedConfig): + model_type = "bark_module" + keys_to_ignore_at_inference = ["past_key_values"] + + attribute_map = { + "num_attention_heads": "num_heads", + "num_hidden_layers": "num_layers", + "vocab_size": "input_vocab_size", + "window_size": "block_size", + } + + def __init__( + self, + block_size=1024, + input_vocab_size=10_048, + output_vocab_size=10_048, + num_layers=12, + num_heads=12, + hidden_size=768, + dropout=0.0, + bias=True, # True: bias in Linears and LayerNorms, like GPT-2. False: a bit better and faster + initializer_range=0.02, + use_cache=True, + **kwargs, + ): + self.block_size = block_size + self.input_vocab_size = input_vocab_size + self.output_vocab_size = output_vocab_size + self.num_layers = num_layers + self.num_heads = num_heads + self.hidden_size = hidden_size + self.dropout = dropout + self.bias = bias + self.use_cache = use_cache + self.initializer_range = initializer_range + + super().__init__(**kwargs) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Union[str, os.PathLike], + cache_dir: Optional[Union[str, os.PathLike]] = None, + force_download: bool = False, + local_files_only: bool = False, + token: Optional[Union[str, bool]] = None, + revision: str = "main", + **kwargs, + ) -> "PretrainedConfig": + kwargs["cache_dir"] = cache_dir + kwargs["force_download"] = force_download + kwargs["local_files_only"] = local_files_only + kwargs["revision"] = revision + + cls._set_token_in_kwargs(kwargs, token) + + config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) + + # get the config dict if we are loading from Bark + if config_dict.get("model_type") == "bark": + config_dict = config_dict[f"{cls.model_type}_config"] + + if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type: + logger.warning( + f"You are using a model of type {config_dict['model_type']} to instantiate a model of type " + f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." + ) + + return cls.from_dict(config_dict, **kwargs) + + +@add_start_docstrings( + BARK_SUBMODELCONFIG_START_DOCSTRING.format(config="BarkSemanticConfig", model="BarkSemanticModel"), + """ + Example: + + ```python + >>> from transformers import BarkSemanticConfig, BarkSemanticModel + + >>> # Initializing a Bark sub-module style configuration + >>> configuration = BarkSemanticConfig() + + >>> # Initializing a model (with random weights) from the suno/bark style configuration + >>> model = BarkSemanticModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""", +) +class BarkSemanticConfig(BarkSubModelConfig): + model_type = "semantic" + + +@add_start_docstrings( + BARK_SUBMODELCONFIG_START_DOCSTRING.format(config="BarkCoarseConfig", model="BarkCoarseModel"), + """ + Example: + + ```python + >>> from transformers import BarkCoarseConfig, BarkCoarseModel + + >>> # Initializing a Bark sub-module style configuration + >>> configuration = BarkCoarseConfig() + + >>> # Initializing a model (with random weights) from the suno/bark style configuration + >>> model = BarkCoarseModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""", +) +class BarkCoarseConfig(BarkSubModelConfig): + model_type = "coarse_acoustics" + + +@add_start_docstrings( + BARK_SUBMODELCONFIG_START_DOCSTRING.format(config="BarkFineConfig", model="BarkFineModel"), + """ + n_codes_total (`int`, *optional*, defaults to 8): + The total number of audio codebooks predicted. Used in the fine acoustics sub-model. + n_codes_given (`int`, *optional*, defaults to 1): + The number of audio codebooks predicted in the coarse acoustics sub-model. Used in the acoustics + sub-models. + Example: + + ```python + >>> from transformers import BarkFineConfig, BarkFineModel + + >>> # Initializing a Bark sub-module style configuration + >>> configuration = BarkFineConfig() + + >>> # Initializing a model (with random weights) from the suno/bark style configuration + >>> model = BarkFineModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""", +) +class BarkFineConfig(BarkSubModelConfig): + model_type = "fine_acoustics" + + def __init__(self, tie_word_embeddings=True, n_codes_total=8, n_codes_given=1, **kwargs): + self.n_codes_total = n_codes_total + self.n_codes_given = n_codes_given + + super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs) + + +class BarkConfig(PretrainedConfig): + """ + This is the configuration class to store the configuration of a [`BarkModel`]. It is used to instantiate a Bark + model according to the specified sub-models configurations, defining the model architecture. + + Instantiating a configuration with the defaults will yield a similar configuration to that of the Bark + [suno/bark](https://huggingface.co/suno/bark) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + Args: + semantic_config ([`BarkSemanticConfig`], *optional*): + Configuration of the underlying semantic sub-model. + coarse_acoustics_config ([`BarkCoarseConfig`], *optional*): + Configuration of the underlying coarse acoustics sub-model. + fine_acoustics_config ([`BarkFineConfig`], *optional*): + Configuration of the underlying fine acoustics sub-model. + codec_config ([`AutoConfig`], *optional*): + Configuration of the underlying codec sub-model. + + Example: + + ```python + >>> from transformers import ( + ... BarkSemanticConfig, + ... BarkCoarseConfig, + ... BarkFineConfig, + ... BarkModel, + ... BarkConfig, + ... AutoConfig, + ... ) + + >>> # Initializing Bark sub-modules configurations. + >>> semantic_config = BarkSemanticConfig() + >>> coarse_acoustics_config = BarkCoarseConfig() + >>> fine_acoustics_config = BarkFineConfig() + >>> codec_config = AutoConfig.from_pretrained("facebook/encodec_24khz") + + + >>> # Initializing a Bark module style configuration + >>> configuration = BarkConfig.from_sub_model_configs( + ... semantic_config, coarse_acoustics_config, fine_acoustics_config, codec_config + ... ) + + >>> # Initializing a model (with random weights) + >>> model = BarkModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ``` + """ + + model_type = "bark" + + def __init__( + self, + semantic_config: Dict = None, + coarse_acoustics_config: Dict = None, + fine_acoustics_config: Dict = None, + codec_config: Dict = None, + initializer_range=0.02, + **kwargs, + ): + if semantic_config is None: + semantic_config = {} + logger.info("semantic_config is None. initializing the semantic model with default values.") + + if coarse_acoustics_config is None: + coarse_acoustics_config = {} + logger.info("coarse_acoustics_config is None. initializing the coarse model with default values.") + + if fine_acoustics_config is None: + fine_acoustics_config = {} + logger.info("fine_acoustics_config is None. initializing the fine model with default values.") + + if codec_config is None: + codec_config = {} + logger.info("codec_config is None. initializing the codec model with default values.") + + self.semantic_config = BarkSemanticConfig(**semantic_config) + self.coarse_acoustics_config = BarkCoarseConfig(**coarse_acoustics_config) + self.fine_acoustics_config = BarkFineConfig(**fine_acoustics_config) + codec_model_type = codec_config["model_type"] if "model_type" in codec_config else "encodec" + self.codec_config = CONFIG_MAPPING[codec_model_type](**codec_config) + + self.initializer_range = initializer_range + + super().__init__(**kwargs) + + @classmethod + def from_sub_model_configs( + cls, + semantic_config: BarkSemanticConfig, + coarse_acoustics_config: BarkCoarseConfig, + fine_acoustics_config: BarkFineConfig, + codec_config: PretrainedConfig, + **kwargs, + ): + r""" + Instantiate a [`BarkConfig`] (or a derived class) from bark sub-models configuration. + + Returns: + [`BarkConfig`]: An instance of a configuration object + """ + return cls( + semantic_config=semantic_config.to_dict(), + coarse_acoustics_config=coarse_acoustics_config.to_dict(), + fine_acoustics_config=fine_acoustics_config.to_dict(), + codec_config=codec_config.to_dict(), + **kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/convert_suno_to_hf.py b/venv/lib/python3.10/site-packages/transformers/models/bark/convert_suno_to_hf.py new file mode 100644 index 0000000000000000000000000000000000000000..4720a70d5cd2adf5fe2fb67f4e8eeece706a8e27 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/bark/convert_suno_to_hf.py @@ -0,0 +1,262 @@ +"""Convert Bark checkpoint.""" +import argparse +import os +from pathlib import Path + +import torch +from bark.generation import _load_model as _bark_load_model +from huggingface_hub import hf_hub_download + +from transformers import EncodecConfig, EncodecModel, set_seed +from transformers.models.bark.configuration_bark import ( + BarkCoarseConfig, + BarkConfig, + BarkFineConfig, + BarkSemanticConfig, +) +from transformers.models.bark.generation_configuration_bark import ( + BarkCoarseGenerationConfig, + BarkFineGenerationConfig, + BarkGenerationConfig, + BarkSemanticGenerationConfig, +) +from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel +from transformers.utils import logging + + +logging.set_verbosity_info() +logger = logging.get_logger(__name__) + +set_seed(770) + + +new_layer_name_dict = { + "c_attn": "att_proj", + "c_proj": "out_proj", + "c_fc": "in_proj", + "transformer.": "", + "h.": "layers.", + "ln_1": "layernorm_1", + "ln_2": "layernorm_2", + "ln_f": "layernorm_final", + "wpe": "position_embeds_layer", + "wte": "input_embeds_layer", +} + + +REMOTE_MODEL_PATHS = { + "text_small": { + "repo_id": "suno/bark", + "file_name": "text.pt", + }, + "coarse_small": { + "repo_id": "suno/bark", + "file_name": "coarse.pt", + }, + "fine_small": { + "repo_id": "suno/bark", + "file_name": "fine.pt", + }, + "text": { + "repo_id": "suno/bark", + "file_name": "text_2.pt", + }, + "coarse": { + "repo_id": "suno/bark", + "file_name": "coarse_2.pt", + }, + "fine": { + "repo_id": "suno/bark", + "file_name": "fine_2.pt", + }, +} + +CUR_PATH = os.path.dirname(os.path.abspath(__file__)) +default_cache_dir = os.path.join(os.path.expanduser("~"), ".cache") +CACHE_DIR = os.path.join(os.getenv("XDG_CACHE_HOME", default_cache_dir), "suno", "bark_v0") + + +def _get_ckpt_path(model_type, use_small=False): + key = model_type + if use_small: + key += "_small" + return os.path.join(CACHE_DIR, REMOTE_MODEL_PATHS[key]["file_name"]) + + +def _download(from_hf_path, file_name): + os.makedirs(CACHE_DIR, exist_ok=True) + hf_hub_download(repo_id=from_hf_path, filename=file_name, local_dir=CACHE_DIR) + + +def _load_model(ckpt_path, device, use_small=False, model_type="text"): + if model_type == "text": + ModelClass = BarkSemanticModel + ConfigClass = BarkSemanticConfig + GenerationConfigClass = BarkSemanticGenerationConfig + elif model_type == "coarse": + ModelClass = BarkCoarseModel + ConfigClass = BarkCoarseConfig + GenerationConfigClass = BarkCoarseGenerationConfig + elif model_type == "fine": + ModelClass = BarkFineModel + ConfigClass = BarkFineConfig + GenerationConfigClass = BarkFineGenerationConfig + else: + raise NotImplementedError() + model_key = f"{model_type}_small" if use_small else model_type + model_info = REMOTE_MODEL_PATHS[model_key] + if not os.path.exists(ckpt_path): + logger.info(f"{model_type} model not found, downloading into `{CACHE_DIR}`.") + _download(model_info["repo_id"], model_info["file_name"]) + checkpoint = torch.load(ckpt_path, map_location=device) + # this is a hack + model_args = checkpoint["model_args"] + if "input_vocab_size" not in model_args: + model_args["input_vocab_size"] = model_args["vocab_size"] + model_args["output_vocab_size"] = model_args["vocab_size"] + del model_args["vocab_size"] + + # convert Bark model arguments to HF Bark model arguments + model_args["num_heads"] = model_args.pop("n_head") + model_args["hidden_size"] = model_args.pop("n_embd") + model_args["num_layers"] = model_args.pop("n_layer") + + model_config = ConfigClass(**checkpoint["model_args"]) + model = ModelClass(config=model_config) + model_generation_config = GenerationConfigClass() + + model.generation_config = model_generation_config + state_dict = checkpoint["model"] + # fixup checkpoint + unwanted_prefix = "_orig_mod." + for k, v in list(state_dict.items()): + if k.startswith(unwanted_prefix): + # replace part of the key with corresponding layer name in HF implementation + new_k = k[len(unwanted_prefix) :] + for old_layer_name in new_layer_name_dict: + new_k = new_k.replace(old_layer_name, new_layer_name_dict[old_layer_name]) + + state_dict[new_k] = state_dict.pop(k) + + extra_keys = set(state_dict.keys()) - set(model.state_dict().keys()) + extra_keys = {k for k in extra_keys if not k.endswith(".attn.bias")} + missing_keys = set(model.state_dict().keys()) - set(state_dict.keys()) + missing_keys = {k for k in missing_keys if not k.endswith(".attn.bias")} + if len(extra_keys) != 0: + raise ValueError(f"extra keys found: {extra_keys}") + if len(missing_keys) != 0: + raise ValueError(f"missing keys: {missing_keys}") + model.load_state_dict(state_dict, strict=False) + n_params = model.num_parameters(exclude_embeddings=True) + val_loss = checkpoint["best_val_loss"].item() + logger.info(f"model loaded: {round(n_params/1e6,1)}M params, {round(val_loss,3)} loss") + model.eval() + model.to(device) + del checkpoint, state_dict + + return model + + +def load_model(pytorch_dump_folder_path, use_small=False, model_type="text"): + if model_type not in ("text", "coarse", "fine"): + raise NotImplementedError() + + device = "cpu" # do conversion on cpu + + ckpt_path = _get_ckpt_path(model_type, use_small=use_small) + model = _load_model(ckpt_path, device, model_type=model_type, use_small=use_small) + + # load bark initial model + bark_model = _bark_load_model(ckpt_path, "cpu", model_type=model_type, use_small=use_small) + + if model_type == "text": + bark_model = bark_model["model"] + + if model.num_parameters(exclude_embeddings=True) != bark_model.get_num_params(): + raise ValueError("initial and new models don't have the same number of parameters") + + # check if same output as the bark model + batch_size = 5 + sequence_length = 10 + + if model_type in ["text", "coarse"]: + vec = torch.randint(256, (batch_size, sequence_length), dtype=torch.int) + output_old_model = bark_model(vec)[0] + + output_new_model_total = model(vec) + + # take last logits + output_new_model = output_new_model_total.logits[:, [-1], :] + + else: + prediction_codeboook_channel = 3 + n_codes_total = 8 + vec = torch.randint(256, (batch_size, sequence_length, n_codes_total), dtype=torch.int) + + output_new_model_total = model(prediction_codeboook_channel, vec) + output_old_model = bark_model(prediction_codeboook_channel, vec) + + output_new_model = output_new_model_total.logits + + # output difference should come from the difference of self-attention implementation design + if output_new_model.shape != output_old_model.shape: + raise ValueError("initial and new outputs don't have the same shape") + if (output_new_model - output_old_model).abs().max().item() > 1e-3: + raise ValueError("initial and new outputs are not equal") + + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + model.save_pretrained(pytorch_dump_folder_path) + + +def load_whole_bark_model( + semantic_path, + coarse_path, + fine_path, + append_text, + hub_path, + folder_path, +): + pytorch_dump_folder_path = os.path.join(folder_path, append_text) + + semanticConfig = BarkSemanticConfig.from_pretrained(os.path.join(semantic_path, "config.json")) + coarseAcousticConfig = BarkCoarseConfig.from_pretrained(os.path.join(coarse_path, "config.json")) + fineAcousticConfig = BarkFineConfig.from_pretrained(os.path.join(fine_path, "config.json")) + codecConfig = EncodecConfig.from_pretrained("facebook/encodec_24khz") + + semantic = BarkSemanticModel.from_pretrained(semantic_path) + coarseAcoustic = BarkCoarseModel.from_pretrained(coarse_path) + fineAcoustic = BarkFineModel.from_pretrained(fine_path) + codec = EncodecModel.from_pretrained("facebook/encodec_24khz") + + bark_config = BarkConfig.from_sub_model_configs( + semanticConfig, coarseAcousticConfig, fineAcousticConfig, codecConfig + ) + + bark_generation_config = BarkGenerationConfig.from_sub_model_configs( + semantic.generation_config, coarseAcoustic.generation_config, fineAcoustic.generation_config + ) + + bark = BarkModel(bark_config) + + bark.semantic = semantic + bark.coarse_acoustics = coarseAcoustic + bark.fine_acoustics = fineAcoustic + bark.codec_model = codec + + bark.generation_config = bark_generation_config + + Path(pytorch_dump_folder_path).mkdir(exist_ok=True) + bark.save_pretrained(pytorch_dump_folder_path, repo_id=hub_path, push_to_hub=True) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + + parser.add_argument("model_type", type=str, help="text, coarse or fine.") + parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") + parser.add_argument("--is_small", action="store_true", help="convert the small version instead of the large.") + + args = parser.parse_args() + + load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small) diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/generation_configuration_bark.py b/venv/lib/python3.10/site-packages/transformers/models/bark/generation_configuration_bark.py new file mode 100644 index 0000000000000000000000000000000000000000..7d7d98449d665f5d67067218609f4bfb814ea17d --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/bark/generation_configuration_bark.py @@ -0,0 +1,331 @@ +# coding=utf-8 +# Copyright 2023 The Suno AI Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" BARK model generation configuration""" + +import copy +from typing import Dict + +from ...generation.configuration_utils import GenerationConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +class BarkSemanticGenerationConfig(GenerationConfig): + model_type = "semantic" + + def __init__( + self, + eos_token_id=10_000, + renormalize_logits=True, + max_new_tokens=768, + output_scores=False, + return_dict_in_generate=False, + output_hidden_states=False, + output_attentions=False, + temperature=1.0, + do_sample=False, + text_encoding_offset=10_048, + text_pad_token=129_595, + semantic_infer_token=129_599, + semantic_vocab_size=10_000, + max_input_semantic_length=256, + semantic_rate_hz=49.9, + min_eos_p=None, + **kwargs, + ): + """Class that holds a generation configuration for [`BarkSemanticModel`]. + + This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the + documentation from [`GenerationConfig`] for more information. + + Args: + eos_token_id (`int`, *optional*, defaults to 10_000): + The id of the *end-of-sequence* token. + renormalize_logits (`bool`, *optional*, defaults to `True`): + Whether to renormalize the logits after applying all the logits processors or warpers (including the + custom ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the + score logits are normalized but some logit processors or warpers break the normalization. + max_new_tokens (`int`, *optional*, defaults to 768): + The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + temperature (`float`, *optional*, defaults to 1.0): + The value used to modulate the next token probabilities. + do_sample (`bool`, *optional*, defaults to `False`): + Whether or not to use sampling ; use greedy decoding otherwise. + text_encoding_offset (`int`, *optional*, defaults to 10_048): + Text encoding offset. + text_pad_token (`int`, *optional*, defaults to 129_595): + Text pad token. + semantic_infer_token (`int`, *optional*, defaults to 129_599): + Semantic infer token. + semantic_vocab_size (`int`, *optional*, defaults to 10_000): + Semantic vocab size. + max_input_semantic_length (`int`, *optional*, defaults to 256): + Max length of semantic input vector. + semantic_rate_hz (`float`, *optional*, defaults to 49.9): + Semantic rate in Hertz. + min_eos_p (`float`, *optional*): + Minimum threshold of the probability of the EOS token for it to be sampled. This is an early stopping + strategy to mitigate potential unwanted generations at the end of a prompt. The original implementation + suggests a default value of 0.2. + """ + super().__init__( + temperature=temperature, + do_sample=do_sample, + eos_token_id=eos_token_id, + renormalize_logits=renormalize_logits, + max_new_tokens=max_new_tokens, + output_scores=output_scores, + return_dict_in_generate=return_dict_in_generate, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + **kwargs, + ) + + self.text_encoding_offset = text_encoding_offset + self.text_pad_token = text_pad_token + self.semantic_pad_token = eos_token_id + self.semantic_infer_token = semantic_infer_token + self.semantic_vocab_size = semantic_vocab_size + self.max_input_semantic_length = max_input_semantic_length + self.semantic_rate_hz = semantic_rate_hz + self.min_eos_p = min_eos_p + + +class BarkCoarseGenerationConfig(GenerationConfig): + model_type = "coarse_acoustics" + + def __init__( + self, + renormalize_logits=True, + output_scores=False, + return_dict_in_generate=False, + output_hidden_states=False, + output_attentions=False, + temperature=1.0, + do_sample=False, + coarse_semantic_pad_token=12_048, + coarse_rate_hz=75, + n_coarse_codebooks=2, + coarse_infer_token=12_050, + max_coarse_input_length=256, + max_coarse_history: int = 630, + sliding_window_len: int = 60, + **kwargs, + ): + """Class that holds a generation configuration for [`BarkCoarseModel`]. + + This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the + documentation from [`GenerationConfig`] for more information. + + Args: + renormalize_logits (`bool`, *optional*, defaults to `True`): + Whether to renormalize the logits after applying all the logits processors or warpers (including the + custom ones). It's highly recommended to set this flag to `True` as the search algorithms suppose the + score logits are normalized but some logit processors or warpers break the normalization. + output_scores (`bool`, *optional*, defaults to `False`): + Whether or not to return the prediction scores. See `scores` under returned tensors for more details. + return_dict_in_generate (`bool`, *optional*, defaults to `False`): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. + output_hidden_states (`bool`, *optional*, defaults to `False`): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors + for more details. + output_attentions (`bool`, *optional*, defaults to `False`): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more details. + temperature (`float`, *optional*, defaults to 1.0): + The value used to modulate the next token probabilities. + do_sample (`bool`, *optional*, defaults to `False`): + Whether or not to use sampling ; use greedy decoding otherwise. + coarse_semantic_pad_token (`int`, *optional*, defaults to 12_048): + Coarse semantic pad token. + coarse_rate_hz (`int`, *optional*, defaults to 75): + Coarse rate in Hertz. + n_coarse_codebooks (`int`, *optional*, defaults to 2): + Number of coarse codebooks. + coarse_infer_token (`int`, *optional*, defaults to 12_050): + Coarse infer token. + max_coarse_input_length (`int`, *optional*, defaults to 256): + Max length of input coarse vector. + max_coarse_history (`int`, *optional*, defaults to 630): + Max length of the output of the coarse acoustics model used in the fine generation step. + sliding_window_len (`int`, *optional*, defaults to 60): + The coarse generation step uses a sliding window to generate raw audio. + """ + super().__init__( + temperature=temperature, + do_sample=do_sample, + renormalize_logits=renormalize_logits, + output_scores=output_scores, + return_dict_in_generate=return_dict_in_generate, + output_hidden_states=output_hidden_states, + output_attentions=output_attentions, + **kwargs, + ) + + self.coarse_semantic_pad_token = coarse_semantic_pad_token + self.coarse_rate_hz = coarse_rate_hz + self.n_coarse_codebooks = n_coarse_codebooks + self.coarse_infer_token = coarse_infer_token + self.max_coarse_input_length = max_coarse_input_length + self.max_coarse_history = max_coarse_history + self.sliding_window_len = sliding_window_len + + +class BarkFineGenerationConfig(GenerationConfig): + model_type = "fine_acoustics" + + def __init__( + self, + temperature=1.0, + max_fine_history_length=512, + max_fine_input_length=1024, + n_fine_codebooks=8, + **kwargs, + ): + """Class that holds a generation configuration for [`BarkFineModel`]. + + [`BarkFineModel`] is an autoencoder model, so should not usually be used for generation. However, under the + hood, it uses `temperature` when used by [`BarkModel`] + + This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the + documentation from [`GenerationConfig`] for more information. + + Args: + temperature (`float`, *optional*): + The value used to modulate the next token probabilities. + max_fine_history_length (`int`, *optional*, defaults to 512): + Max length of the fine history vector. + max_fine_input_length (`int`, *optional*, defaults to 1024): + Max length of fine input vector. + n_fine_codebooks (`int`, *optional*, defaults to 8): + Number of codebooks used. + """ + super().__init__(temperature=temperature) + + self.max_fine_history_length = max_fine_history_length + self.max_fine_input_length = max_fine_input_length + self.n_fine_codebooks = n_fine_codebooks + + def validate(self, **kwargs): + """ + Overrides GenerationConfig.validate because BarkFineGenerationConfig don't use any parameters outside + temperature. + """ + pass + + +class BarkGenerationConfig(GenerationConfig): + model_type = "bark" + is_composition = True + + # TODO (joao): nested from_dict + + def __init__( + self, + semantic_config: Dict = None, + coarse_acoustics_config: Dict = None, + fine_acoustics_config: Dict = None, + sample_rate=24_000, + codebook_size=1024, + **kwargs, + ): + """Class that holds a generation configuration for [`BarkModel`]. + + The [`BarkModel`] does not have a `generate` method, but uses this class to generate speeches with a nested + [`BarkGenerationConfig`] which uses [`BarkSemanticGenerationConfig`], [`BarkCoarseGenerationConfig`], + [`BarkFineGenerationConfig`]. + + This configuration inherit from [`GenerationConfig`] and can be used to control the model generation. Read the + documentation from [`GenerationConfig`] for more information. + + Args: + semantic_config (`Dict`, *optional*): + Semantic generation configuration. + coarse_acoustics_config (`Dict`, *optional*): + Coarse generation configuration. + fine_acoustics_config (`Dict`, *optional*): + Fine generation configuration. + sample_rate (`int`, *optional*, defaults to 24_000): + Sample rate. + codebook_size (`int`, *optional*, defaults to 1024): + Vector length for each codebook. + """ + if semantic_config is None: + semantic_config = {} + logger.info("semantic_config is None. initializing the semantic model with default values.") + + if coarse_acoustics_config is None: + coarse_acoustics_config = {} + logger.info("coarse_acoustics_config is None. initializing the coarse model with default values.") + + if fine_acoustics_config is None: + fine_acoustics_config = {} + logger.info("fine_acoustics_config is None. initializing the fine model with default values.") + + self.semantic_config = BarkSemanticGenerationConfig(**semantic_config) + self.coarse_acoustics_config = BarkCoarseGenerationConfig(**coarse_acoustics_config) + self.fine_acoustics_config = BarkFineGenerationConfig(**fine_acoustics_config) + + self.sample_rate = sample_rate + self.codebook_size = codebook_size + + @classmethod + def from_sub_model_configs( + cls, + semantic_config: BarkSemanticGenerationConfig, + coarse_acoustics_config: BarkCoarseGenerationConfig, + fine_acoustics_config: BarkFineGenerationConfig, + **kwargs, + ): + r""" + Instantiate a [`BarkGenerationConfig`] (or a derived class) from bark sub-models generation configuration. + + Returns: + [`BarkGenerationConfig`]: An instance of a configuration object + """ + return cls( + semantic_config=semantic_config.to_dict(), + coarse_acoustics_config=coarse_acoustics_config.to_dict(), + fine_acoustics_config=fine_acoustics_config.to_dict(), + **kwargs, + ) + + def to_dict(self): + """ + Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`]. + + Returns: + `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, + """ + output = copy.deepcopy(self.__dict__) + + output["semantic_config"] = self.semantic_config.to_dict() + output["coarse_acoustics_config"] = self.coarse_acoustics_config.to_dict() + output["fine_acoustics_config"] = self.fine_acoustics_config.to_dict() + + output["model_type"] = self.__class__.model_type + return output diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/modeling_bark.py b/venv/lib/python3.10/site-packages/transformers/models/bark/modeling_bark.py new file mode 100644 index 0000000000000000000000000000000000000000..a40ce7941050243ad3180d878cf9b241a536de46 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/bark/modeling_bark.py @@ -0,0 +1,1908 @@ +# coding=utf-8 +# Copyright 2023 The Suno AI Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch BARK model.""" +import math +from typing import Dict, Optional, Tuple, Union + +import numpy as np +import torch +from torch import nn +from torch.nn import functional as F + +from ...generation.logits_process import ( + AlternatingCodebooksLogitsProcessor, + BarkEosPrioritizerLogitsProcessor, + SuppressTokensLogitsProcessor, +) +from ...modeling_attn_mask_utils import _prepare_4d_attention_mask +from ...modeling_outputs import CausalLMOutputWithPast, MaskedLMOutput +from ...modeling_utils import PreTrainedModel, get_parameter_device +from ...utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_accelerate_available, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, +) +from ..auto import AutoModel +from .configuration_bark import ( + BarkCoarseConfig, + BarkConfig, + BarkFineConfig, + BarkSemanticConfig, + BarkSubModelConfig, +) +from .generation_configuration_bark import ( + BarkCoarseGenerationConfig, + BarkFineGenerationConfig, + BarkSemanticGenerationConfig, +) + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +logger = logging.get_logger(__name__) + + +_CHECKPOINT_FOR_DOC = "suno/bark-small" +_CONFIG_FOR_DOC = "BarkConfig" + + +from ..deprecated._archive_maps import BARK_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +class BarkSelfAttention(nn.Module): + # adapted from GPTNeoSelfAttention and Bark code + # BarkSelfAttention can have two attention type, i.e full attention or causal attention + + def __init__(self, config, is_causal=False): + super().__init__() + + # regularization + self.dropout = config.dropout + self.attn_dropout = nn.Dropout(config.dropout) + self.resid_dropout = nn.Dropout(config.dropout) + + self.embed_dim = config.hidden_size + self.num_heads = config.num_heads + self.head_dim = self.embed_dim // self.num_heads + + if config.hidden_size % config.num_heads != 0: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + + # key, query, value projections for all heads, but in a batch + self.att_proj = nn.Linear(config.hidden_size, 3 * config.hidden_size, bias=config.bias) + # output projection + self.out_proj = nn.Linear(config.hidden_size, config.hidden_size, bias=config.bias) + + self.is_causal = is_causal + if is_causal: + block_size = config.block_size + bias = torch.tril(torch.ones((block_size, block_size), dtype=bool)).view(1, 1, block_size, block_size) + self.register_buffer("bias", bias) + + # Copied from transformers.models.gpt_neo.modeling_gpt_neo.GPTNeoSelfAttention._split_heads + def _split_heads(self, tensor, num_heads, attn_head_size): + """ + Splits hidden_size dim into attn_head_size and num_heads + """ + new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) + tensor = tensor.view(new_shape) + return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) + + def _merge_heads(self, tensor, num_heads, attn_head_size): + """ + Merges attn_head_size dim and num_attn_heads dim into hidden_size + """ + + # re-assemble all head outputs side by side + # (batch, num_heads, seq_len, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size) + tensor = tensor.transpose(1, 2).contiguous() + tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) + + return tensor + + def _attn(self, query, key, value, attention_mask=None, head_mask=None): + # unlike GPTNeo's SelfAttention, divide by the square root of the dimension of the query and the key + attn_weights = torch.matmul(query, key.transpose(-1, -2)) * (1.0 / math.sqrt(self.head_dim)) + + if self.is_causal: + query_length, key_length = query.size(-2), key.size(-2) + + # fill the upper left part of the attention weights with inf + attn_weights = attn_weights.masked_fill( + self.bias[:, :, key_length - query_length : key_length, :key_length] == 0, + torch.finfo(attn_weights.dtype).min, + ) + + if attention_mask is not None: + # Apply the attention mask + attn_weights = attn_weights + attention_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + attn_weights = attn_weights.to(value.dtype) + attn_weights = self.attn_dropout(attn_weights) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + # (batch, num_heads, seq_len, seq_len) x (batch, num_heads, seq_len, attn_head_size) + # -> (batch, num_heads, seq_len, attn_head_size) + attn_output = torch.matmul(attn_weights, value) + + return attn_output, attn_weights + + def forward( + self, + hidden_states, + attention_mask=None, + past_key_values=None, + head_mask=None, + use_cache=False, + output_attentions=False, + ): + # calculate query, key, values for all heads in batch and move head forward to be the batch dim + query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if past_key_values is not None: + past_key = past_key_values[0] + past_value = past_key_values[1] + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + if use_cache is True: + present = (key, value) + else: + present = None + + attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) + + attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) + attn_output = self.out_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class BarkSelfFlashAttention2(BarkSelfAttention): + """ + Bark flash attention module. This module inherits from `BarkSelfAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def _split_heads(self, tensor, num_heads, attn_head_size): + """ + Splits hidden_size dim into attn_head_size and num_heads + """ + new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) + tensor = tensor.view(new_shape) + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim - (batch, seq_length, head, head_features) + return tensor + + def _merge_heads(self, tensor, num_heads, attn_head_size): + """ + Merges attn_head_size dim and num_attn_heads dim into hidden_size + """ + # re-assemble all head outputs side by side + # (batch, seq_len, num_heads, attn_head_size) -> (batch, seq_len, num_heads*attn_head_size) + tensor = tensor.view(tensor.size()[:-2] + (num_heads * attn_head_size,)) + return tensor + + def forward( + self, + hidden_states, + attention_mask=None, + past_key_values=None, + head_mask=None, + use_cache=False, + output_attentions=False, + ): + batch_size, query_len, _ = hidden_states.size() + + # calculate query, key, values for all heads in batch and move head forward to be the batch dim + query, key, value = self.att_proj(hidden_states).split(self.embed_dim, dim=2) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if past_key_values is not None: + # (batch, head, seq_length, head_features) -> (batch, seq_length, head, head_features) + past_key = past_key_values[0].transpose(1, 2) + past_value = past_key_values[1].transpose(1, 2) + # and merge on seq_length + key = torch.cat((past_key, key), dim=1) + value = torch.cat((past_value, value), dim=1) + + if use_cache is True: + # (batch, head, seq_length, head_features) + present = (key.transpose(1, 2), value.transpose(1, 2)) + else: + present = None + + attn_output = self._flash_attention_forward(query, key, value, attention_mask, query_len, dropout=self.dropout) + + attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) + attn_output = self.out_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + attn_weights = None + outputs += (attn_weights,) + + return outputs + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`float`): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +BARK_ATTENTION_CLASSES = { + "eager": BarkSelfAttention, + "flash_attention_2": BarkSelfFlashAttention2, +} + + +class BarkLayerNorm(nn.Module): + """LayerNorm but with an optional bias. PyTorch doesn't support simply bias=False.""" + + def __init__(self, hidden_size, bias=True): + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.bias = nn.Parameter(torch.zeros(hidden_size)) if bias else None + + def forward(self, input): + return F.layer_norm(input, self.weight.shape, self.weight, self.bias, eps=1e-5) + + +class BarkMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.in_proj = nn.Linear(config.hidden_size, 4 * config.hidden_size, bias=config.bias) + self.out_proj = nn.Linear(4 * config.hidden_size, config.hidden_size, bias=config.bias) + self.dropout = nn.Dropout(config.dropout) + self.gelu = nn.GELU() + + def forward(self, hidden_states): + hidden_states = self.in_proj(hidden_states) + hidden_states = self.gelu(hidden_states) + hidden_states = self.out_proj(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class BarkBlock(nn.Module): + def __init__(self, config, is_causal=False): + super().__init__() + + if is_causal: + # if causal, uses handmade LayerNorm, so that the layerNorm bias is optional + # this handmade layerNorm is used to stick with Bark choice of leaving optional bias in + # AutoRegressive models (corresponding to the "Text" and the "Coarse" modules) + self.layernorm_1 = BarkLayerNorm(config.hidden_size, bias=config.bias) + self.layernorm_2 = BarkLayerNorm(config.hidden_size, bias=config.bias) + else: + self.layernorm_1 = nn.LayerNorm(config.hidden_size) + self.layernorm_2 = nn.LayerNorm(config.hidden_size) + + self.attn = BARK_ATTENTION_CLASSES[config._attn_implementation](config, is_causal=is_causal) + + self.mlp = BarkMLP(config) + + def forward( + self, + hidden_states, + past_key_values=None, + attention_mask=None, + head_mask=None, + use_cache=False, + output_attentions=False, + ): + intermediary_hidden_states = self.layernorm_1(hidden_states) + + attn_outputs = self.attn( + intermediary_hidden_states, + past_key_values=past_key_values, + attention_mask=attention_mask, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + + attn_output = attn_outputs[0] # output_attn: output, present_key_values, (attn_weights) + outputs = attn_outputs[1:] + + intermediary_hidden_states = hidden_states + attn_output + intermediary_hidden_states = intermediary_hidden_states + self.mlp( + self.layernorm_2(intermediary_hidden_states) + ) + + if use_cache: + outputs = (intermediary_hidden_states,) + outputs + else: + outputs = (intermediary_hidden_states,) + outputs[1:] + + return outputs # hidden_states, ((present), attentions) + + +class BarkPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = BarkConfig + supports_gradient_checkpointing = False + _supports_flash_attn_2 = True + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear,)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + def __init__(self, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + @property + def device(self) -> torch.device: + """ + `torch.device`: The device on which the module is (assuming that all the module parameters are on the same + device). + """ + + # if has _hf_hook, has been offloaded so the device has to be found in the hook + if not hasattr(self, "_hf_hook"): + return get_parameter_device(self) + for module in self.modules(): + if ( + hasattr(module, "_hf_hook") + and hasattr(module._hf_hook, "execution_device") + and module._hf_hook.execution_device is not None + ): + return torch.device(module._hf_hook.execution_device) + + return get_parameter_device(self) + + +BARK_MODEL_START_DOCSTRING = """ + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`{config}`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +BARK_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`BarkConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +BARK_FINE_INPUTS_DOCSTRING = r""" + Args: + codebook_idx (`int`): + Index of the codebook that will be predicted. + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length, number_of_codebooks)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. Initially, indices of the first two codebooks are obtained from the `coarse` sub-model. The rest is + predicted recursively by attending the previously predicted channels. The model predicts on windows of + length 1024. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): NOT IMPLEMENTED YET. + input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. If + `past_key_values` is used, optionally only the last `input_embeds` have to be input (see + `past_key_values`). This is useful if you want more control over how to convert `input_ids` indices into + associated vectors than the model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + +BARK_CAUSAL_MODEL_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) + past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache` is passed or when `config.use_cache=True`): + Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape + `(batch_size, num_heads, sequence_length, embed_size_per_head)`. + + Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see + `past_key_values` input) to speed up sequential decoding. + + If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that + don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all + `input_ids` of shape `(batch_size, sequence_length)`. + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.Tensor` of shape `(encoder_layers, encoder_attention_heads)`, *optional*): + Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + input_embeds (`torch.FloatTensor` of shape `(batch_size, input_sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. + Here, due to `Bark` particularities, if `past_key_values` is used, `input_embeds` will be ignored and you + have to use `input_ids`. If `past_key_values` is not used and `use_cache` is set to `True`, `input_embeds` + is used in priority instead of `input_ids`. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +# GPT2-like autoregressive model +class BarkCausalModel(BarkPreTrainedModel): + config_class = BarkSubModelConfig + + def __init__(self, config): + super().__init__(config) + self.config = config + + # initialize as an autoregressive GPT-like model + self.input_embeds_layer = nn.Embedding(config.input_vocab_size, config.hidden_size) + self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) + + self.drop = nn.Dropout(config.dropout) + + self.layers = nn.ModuleList([BarkBlock(config, is_causal=True) for _ in range(config.num_layers)]) + self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" + + self.layernorm_final = BarkLayerNorm(config.hidden_size, bias=config.bias) + + self.lm_head = nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) + self.gradient_checkpointing = False + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.input_embeds_layer + + def set_input_embeddings(self, new_embeddings): + self.input_embeds_layer = new_embeddings + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, **kwargs): + input_embeds = kwargs.get("input_embeds", None) + + attention_mask = kwargs.get("attention_mask", None) + position_ids = kwargs.get("position_ids", None) + + if past_key_values is not None: + # Omit tokens covered by past_key_values + seq_len = input_ids.shape[1] + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + + # input_embeds have already been used and is not required anymore + input_embeds = None + else: + if input_embeds is not None and kwargs.get("use_cache"): + seq_len = input_embeds.shape[1] + else: + seq_len = input_ids.shape[1] + + # ensure that attention_mask and position_ids shapes are aligned with the weird Bark hack of reducing + # sequence length on the first forward pass + if attention_mask is not None: + attention_mask = attention_mask[:, :seq_len] + if position_ids is not None: + position_ids = position_ids[:, :seq_len] + + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + else: + position_ids = None + + if input_embeds is not None and kwargs.get("use_cache"): + return { + "input_ids": None, + "input_embeds": input_embeds, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "position_ids": position_ids, + "attention_mask": attention_mask, + } + return { + "input_ids": input_ids, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "position_ids": position_ids, + "attention_mask": attention_mask, + } + + @add_start_docstrings_to_model_forward(BARK_CAUSAL_MODEL_INPUTS_DOCSTRING) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.LongTensor] = None, + input_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # Verify if input_embeds already exists + # then compute embeddings. + if input_ids is not None and input_embeds is not None: + raise ValueError("You cannot specify both input_ids and input_embeds at the same time") + elif input_embeds is not None and past_key_values is None: + # we want to return the input_embeds in priority so that it is in line with a weird hack + # of Bark which concatenate two bits of the input_embeds on the first forward pass of the semantic model + pass + elif input_ids is not None: + input_embeds = self.input_embeds_layer(input_ids) # token embeddings of shape (b, t, n_embd) + elif input_embeds is not None: + pass + else: + raise ValueError("You have to specify either input_ids or input_embeds") + + input_shape = input_embeds.size()[:-1] + batch_size = input_embeds.shape[0] + seq_length = input_shape[-1] + + device = input_ids.device if input_ids is not None else input_embeds.device + + if past_key_values is None: + past_length = 0 + past_key_values = tuple([None] * len(self.layers)) + else: + past_length = past_key_values[0][0].size(-2) + + if position_ids is None: + position_ids = torch.arange(past_length, seq_length + past_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0) # shape (1, seq_length) + + position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd) + + # Attention mask. + if attention_mask is not None: + if batch_size <= 0: + raise ValueError("batch_size has to be defined and > 0") + if self._use_flash_attention_2: + attention_mask = attention_mask if 0 in attention_mask else None + else: + attention_mask = attention_mask.view(batch_size, -1) + # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length] + # from_seq_length is 1 to easily broadcast + attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x num_heads x N x N + # head_mask has shape num_layers x batch x num_heads x N x N + head_mask = self.get_head_mask(head_mask, self.config.num_layers) + + hidden_states = self.drop(input_embeds + position_embeds) + output_shape = input_shape + (hidden_states.size(-1),) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + present_key_values = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + for i, (block, past_layer_key_values) in enumerate(zip(self.layers, past_key_values)): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + outputs = self._gradient_checkpointing_func( + block.__call__, + hidden_states, + None, + attention_mask, + head_mask[i], + use_cache, + output_attentions, + ) + else: + outputs = block( + hidden_states, + past_key_values=past_layer_key_values, + attention_mask=attention_mask, + head_mask=head_mask[i], + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = outputs[0] + + if use_cache: + present_key_values = present_key_values + (outputs[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) + + hidden_states = self.layernorm_final(hidden_states) + + hidden_states = hidden_states.view(output_shape) + + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + raise NotImplementedError( + "Training is not implemented yet for Bark - ensure you do not pass `labels` to the model." + ) + + if not return_dict: + return tuple( + v for v in [None, logits, present_key_values, all_hidden_states, all_self_attentions] if v is not None + ) + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=present_key_values, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + @staticmethod + def _reorder_cache( + past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor + ) -> Tuple[Tuple[torch.Tensor]]: + """ + This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or + [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + """ + # Necessary for beam_search + return tuple( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) + for layer_past in past_key_values + ) + + +@add_start_docstrings( + """Bark semantic (or text) model. It shares the same architecture as the coarse model. + It is a GPT-2 like autoregressive model with a language modeling head on top.""", + BARK_MODEL_START_DOCSTRING.format(config="BarkSemanticConfig"), +) +class BarkSemanticModel(BarkCausalModel): + base_model_prefix = "semantic" + config_class = BarkSemanticConfig + + def generate( + self, + input_ids: torch.Tensor, + semantic_generation_config: BarkSemanticGenerationConfig = None, + history_prompt: Optional[Dict[str, torch.Tensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + **kwargs, + ) -> torch.LongTensor: + """ + Generates text semantic tokens from an input prompt and an additional optional `Bark` speaker prompt. + + Args: + input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*): + Input ids, i.e tokenized input sentences. Will be truncated up to + semantic_generation_config.max_input_semantic_length tokens. Note that the output audios will be as + long as the longest generation among the batch. + semantic_generation_config (`BarkSemanticGenerationConfig`): + Generation config indicating how to generate the semantic tokens. + history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): + Optional `Bark` speaker prompt. + attention_mask (`Optional[torch.Tensor]`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + Returns: + torch.LongTensor: Output semantic tokens. + """ + if semantic_generation_config is None: + raise ValueError("`semantic_generation_config` has to be provided") + + batch_size = input_ids.shape[0] + + max_input_semantic_length = semantic_generation_config.max_input_semantic_length + + input_ids = input_ids + semantic_generation_config.text_encoding_offset + + if attention_mask is not None: + input_ids = input_ids.masked_fill((1 - attention_mask).bool(), semantic_generation_config.text_pad_token) + + if history_prompt is not None: + semantic_history = history_prompt["semantic_prompt"][-max_input_semantic_length:] + semantic_history = nn.functional.pad( + semantic_history, + (0, max_input_semantic_length - len(semantic_history)), + value=semantic_generation_config.semantic_pad_token, + mode="constant", + ) + else: + semantic_history = torch.tensor( + [semantic_generation_config.semantic_pad_token] * max_input_semantic_length, dtype=torch.int + ).to(self.device) + + semantic_history = torch.repeat_interleave(semantic_history[None], batch_size, dim=0) + + infer_array = torch.tensor( + [[semantic_generation_config.semantic_infer_token]] * batch_size, dtype=torch.int + ).to(self.device) + + input_embeds = torch.cat( + [ + self.input_embeds_layer(input_ids[:, :max_input_semantic_length]) + + self.input_embeds_layer(semantic_history[:, : max_input_semantic_length + 1]), + self.input_embeds_layer(infer_array), + ], + dim=1, + ) + + tokens_to_suppress = list( + range(semantic_generation_config.semantic_vocab_size, semantic_generation_config.semantic_pad_token) + ) + tokens_to_suppress.extend( + list(range(semantic_generation_config.semantic_pad_token + 1, self.config.output_vocab_size)) + ) + + suppress_tokens_logits_processor = SuppressTokensLogitsProcessor(tokens_to_suppress) + + min_eos_p = kwargs.get("min_eos_p", semantic_generation_config.min_eos_p) + early_stopping_logits_processor = BarkEosPrioritizerLogitsProcessor( + eos_token_id=semantic_generation_config.eos_token_id, min_eos_p=min_eos_p + ) + + # pass input_ids in order to stay consistent with the transformers generate method even though it is not used + # (except to get the input seq_len - that's why we keep the first 257 tokens) + semantic_output = super().generate( + torch.ones((batch_size, max_input_semantic_length + 1), dtype=torch.int).to(self.device), + input_embeds=input_embeds, + logits_processor=[suppress_tokens_logits_processor, early_stopping_logits_processor], + generation_config=semantic_generation_config, + **kwargs, + ) # size: 10048 + + # take the generated semantic tokens + semantic_output = semantic_output[:, max_input_semantic_length + 1 :] + + return semantic_output + + +@add_start_docstrings( + """Bark coarse acoustics model. + It shares the same architecture as the semantic (or text) model. It is a GPT-2 like autoregressive model with a + language modeling head on top.""", + BARK_MODEL_START_DOCSTRING.format(config="BarkCoarseConfig"), +) +class BarkCoarseModel(BarkCausalModel): + base_model_prefix = "coarse_acoustics" + config_class = BarkCoarseConfig + + def preprocess_histories( + self, + max_coarse_history: int, + semantic_to_coarse_ratio: int, + batch_size: int, + semantic_generation_config: int, + codebook_size: int, + history_prompt: Optional[Dict[str, torch.Tensor]] = None, + ): + """ + Preprocess the optional `Bark` speaker prompts before `self.generate`. + + Args: + max_coarse_history (`int`): + Maximum size of coarse tokens used. + semantic_to_coarse_ratio (`int`): + Ratio of semantic to coarse frequency + batch_size (`int`): + Batch size, i.e the number of samples. + semantic_generation_config (`BarkSemanticGenerationConfig`): + Generation config indicating how to generate the semantic tokens. + codebook_size (`int`): + Codebook channel size, i.e. the size of the output vocabulary per codebook channel. + history_prompt (`Optional[Dict[str,torch.Tensor]]`): + Optional `Bark` speaker prompt. + Returns: Returns: + `tuple(torch.FloatTensor)`: + - **x_semantic_history** (`torch.FloatTensor` -- Processed semantic speaker prompt. + - **x_coarse_history** (`torch.FloatTensor`) -- Processed coarse speaker prompt. + """ + if history_prompt is not None: + x_semantic_history = torch.repeat_interleave(history_prompt["semantic_prompt"][None], batch_size, dim=0) + # clone to avoid modifying history_prompt.coarse_prompt + x_coarse_history = history_prompt["coarse_prompt"].clone() + + # offset x_coarse_history + if codebook_size is not None: + for n in range(1, x_coarse_history.shape[0]): + # offset + x_coarse_history[n, :] += codebook_size * n + + # flatten x_coarse_history + x_coarse_history = torch.transpose(x_coarse_history, 0, 1).reshape(-1) + + x_coarse_history = x_coarse_history + semantic_generation_config.semantic_vocab_size + + x_coarse_history = torch.repeat_interleave(x_coarse_history[None], batch_size, dim=0) + # e.g: after SEMANTIC_VOCAB_SIZE (10000), 1024 tokens dedicated to first codebook, 1024 next tokens + # dedicated to second codebook. + + max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) + # trim histories correctly + n_semantic_hist_provided = min( + [ + max_semantic_history, + x_semantic_history.shape[1] - x_semantic_history.shape[1] % 2, + int(np.floor(x_coarse_history.shape[1] / semantic_to_coarse_ratio)), + ] + ) + + n_coarse_hist_provided = int(round(n_semantic_hist_provided * semantic_to_coarse_ratio)) + + x_semantic_history = x_semantic_history[:, -n_semantic_hist_provided:].int() + x_coarse_history = x_coarse_history[:, -n_coarse_hist_provided:].int() + # bit of a hack for time alignment (sounds better) - from Bark original implementation + x_coarse_history = x_coarse_history[:, :-2] + + else: + # shape: (batch_size, 0) + x_semantic_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device) + x_coarse_history = torch.tensor([[]] * batch_size, dtype=torch.int).to(self.device) + + return x_semantic_history, x_coarse_history + + def generate( + self, + semantic_output: torch.Tensor, + semantic_generation_config: BarkSemanticGenerationConfig = None, + coarse_generation_config: BarkCoarseGenerationConfig = None, + codebook_size: int = 1024, + history_prompt: Optional[Dict[str, torch.Tensor]] = None, + return_output_lengths: Optional[bool] = None, + **kwargs, + ) -> Union[torch.LongTensor, Tuple[torch.LongTensor, torch.LongTensor]]: + """ + Generates coarse acoustics tokens from input text semantic tokens and an additional optional `Bark` speaker + prompt. + + Args: + semantic_output (`torch.Tensor` of shape (batch_size, seq_len), *optional*): + Input text semantic ids, i.e the output of `BarkSemanticModel.generate`. + semantic_generation_config (`BarkSemanticGenerationConfig`): + Generation config indicating how to generate the semantic tokens. + coarse_generation_config (`BarkCoarseGenerationConfig`): + Generation config indicating how to generate the coarse tokens. + codebook_size (`int`, *optional*, defaults to 1024): + Codebook channel size, i.e. the size of the output vocabulary per codebook channel. + history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): + Optional `Bark` speaker prompt. + return_output_lengths (`bool`, *optional*): + Whether or not to return the output lengths. Useful when batching. + Returns: + By default: + torch.LongTensor: Output coarse acoustics tokens. + If `return_output_lengths=True`: + `Tuple(torch.Tensor, torch.Tensor): The output coarse acoustics tokens, and the length of each sample + of the batch. + """ + + if semantic_generation_config is None: + raise ValueError("`semantic_generation_config` has to be provided") + + if coarse_generation_config is None: + raise ValueError("`coarse_generation_config` has to be provided") + + max_coarse_input_length = coarse_generation_config.max_coarse_input_length + max_coarse_history = coarse_generation_config.max_coarse_history + sliding_window_len = coarse_generation_config.sliding_window_len + + # replace semantic_pad_token (eos_tok and pad_tok here) with coarse_semantic_pad_token i.e the pad_token + # used in the next model + semantic_output.masked_fill_( + semantic_output == semantic_generation_config.semantic_pad_token, + coarse_generation_config.coarse_semantic_pad_token, + ) + + semantic_to_coarse_ratio = ( + coarse_generation_config.coarse_rate_hz + / semantic_generation_config.semantic_rate_hz + * coarse_generation_config.n_coarse_codebooks + ) + max_semantic_history = int(np.floor(max_coarse_history / semantic_to_coarse_ratio)) + + output_lengths = (semantic_output != coarse_generation_config.coarse_semantic_pad_token).sum(1) + output_lengths = torch.floor( + output_lengths * semantic_to_coarse_ratio / coarse_generation_config.n_coarse_codebooks + ) + output_lengths = torch.round(output_lengths * coarse_generation_config.n_coarse_codebooks).int() + + max_generated_len = torch.max(output_lengths).item() + + batch_size = semantic_output.shape[0] + + x_semantic_history, x_coarse = self.preprocess_histories( + history_prompt=history_prompt, + max_coarse_history=max_coarse_history, + semantic_to_coarse_ratio=semantic_to_coarse_ratio, + batch_size=batch_size, + semantic_generation_config=semantic_generation_config, + codebook_size=codebook_size, + ) + base_semantic_idx = x_semantic_history.shape[1] + + semantic_output = torch.hstack([x_semantic_history, semantic_output]) + + n_window_steps = int(np.ceil(max_generated_len / sliding_window_len)) + + total_generated_len = 0 + + len_coarse_history = x_coarse.shape[1] + + for _ in range(n_window_steps): + semantic_idx = base_semantic_idx + int(round(total_generated_len / semantic_to_coarse_ratio)) + + # pad from right side + input_coarse = semantic_output[:, np.max([0, semantic_idx - max_semantic_history]) :] + input_coarse = input_coarse[:, :max_coarse_input_length] + input_coarse = F.pad( + input_coarse, + (0, max_coarse_input_length - input_coarse.shape[-1]), + "constant", + coarse_generation_config.coarse_semantic_pad_token, + ) + + input_coarse = torch.hstack( + [ + input_coarse, + torch.tensor([[coarse_generation_config.coarse_infer_token]] * batch_size).to(self.device), + x_coarse[:, -max_coarse_history:], + ] + ) + + alternatingLogitsProcessor = AlternatingCodebooksLogitsProcessor( + input_coarse.shape[1], + semantic_generation_config.semantic_vocab_size, + codebook_size, + ) + + output_coarse = super().generate( + input_coarse, + logits_processor=[alternatingLogitsProcessor], + max_new_tokens=min(sliding_window_len, max_generated_len - total_generated_len), + generation_config=coarse_generation_config, + **kwargs, + ) + + input_coarse_len = input_coarse.shape[1] + + x_coarse = torch.hstack([x_coarse, output_coarse[:, input_coarse_len:]]) + total_generated_len = x_coarse.shape[1] - len_coarse_history + + del output_coarse + + coarse_output = x_coarse[:, len_coarse_history:] + + if return_output_lengths: + return coarse_output, output_lengths + + return coarse_output + + +@add_start_docstrings( + """Bark fine acoustics model. It is a non-causal GPT-like model with `config.n_codes_total` embedding layers and + language modeling heads, one for each codebook.""", + BARK_MODEL_START_DOCSTRING.format(config="BarkFineConfig"), +) +class BarkFineModel(BarkPreTrainedModel): + base_model_prefix = "fine_acoustics" + config_class = BarkFineConfig + main_input_name = "codebook_idx" + + def __init__(self, config): + # non-causal gpt-like model with one embedding layer and one lm_head for each codebook of Encodec + super().__init__(config) + self.config = config + + # initialize a modified non causal GPT-like model + # note that for there is one embedding layer and one lm_head for each codebook of Encodec + self.input_embeds_layers = nn.ModuleList( + [nn.Embedding(config.input_vocab_size, config.hidden_size) for _ in range(config.n_codes_total)] + ) + self.position_embeds_layer = nn.Embedding(config.block_size, config.hidden_size) + + self.drop = nn.Dropout(config.dropout) + + self.layers = nn.ModuleList([BarkBlock(config, is_causal=False) for _ in range(config.num_layers)]) + self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" + + self.layernorm_final = nn.LayerNorm(config.hidden_size) + + self.lm_heads = nn.ModuleList( + [ + nn.Linear(config.hidden_size, config.output_vocab_size, bias=False) + for _ in range(config.n_codes_given, config.n_codes_total) + ] + ) + self.gradient_checkpointing = False + self.n_codes_total = config.n_codes_total + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + # one embedding layers for each codebook + return self.input_embeds_layers + + def set_input_embeddings(self, new_embeddings): + # one embedding layers for each codebook + self.input_embeds_layers = new_embeddings + + def get_output_embeddings(self): + # one lm_head for each codebook + return self.lm_heads + + def set_output_embeddings(self, new_output_embeddings): + # one lm_head for each codebook + self.lm_heads = new_output_embeddings + + def _resize_token_embeddings(self, new_num_tokens, pad_to_multiple_of=None): + old_embeddings_list = self.get_input_embeddings() + new_embeddings_list = nn.ModuleList( + [ + self._get_resized_embeddings(old_embeddings, new_num_tokens, pad_to_multiple_of) + for old_embeddings in old_embeddings_list + ] + ) + self.set_input_embeddings(new_embeddings_list) + new_num_tokens = new_embeddings_list[0].weight.shape[0] + + # if word embeddings are not tied, make sure that lm head is resized as well + if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings: + old_lm_head_list = self.get_output_embeddings() + new_lm_head_list = nn.ModuleList( + [self._get_resized_lm_head(old_lm_head, new_num_tokens) for old_lm_head in old_lm_head_list] + ) + self.set_output_embeddings(new_lm_head_list) + + return self.get_input_embeddings() + + def resize_token_embeddings( + self, new_num_tokens: Optional[int] = None, pad_to_multiple_of: Optional[int] = None + ) -> nn.Embedding: + """ + Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`. + + Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method. + + Arguments: + new_num_tokens (`int`, *optional*): + The number of new tokens in the embedding matrix. Increasing the size will add newly initialized + vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just + returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything. + pad_to_multiple_of (`int`, *optional*): + If set will pad the embedding matrix to a multiple of the provided value. + + This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability + `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128. For more + details about this, or help on choosing the correct value for resizing, refer to this guide: + https://docs.nvidia.com/deeplearning/performance/dl-performance-matrix-multiplication/index.html#requirements-tc + + Return: + `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model. + """ + model_embeds = self._resize_token_embeddings(new_num_tokens, pad_to_multiple_of) + if new_num_tokens is None and pad_to_multiple_of is None: + return model_embeds + + # Update base model and current model config + self.config.output_vocab_size = model_embeds[0].weight.shape[0] + self.config.vocab_size = model_embeds[0].weight.shape[0] + self.output_vocab_size = model_embeds[0].weight.shape[0] + self.vocab_size = model_embeds[0].weight.shape[0] + + # Tie weights again if needed + self.tie_weights() + + return model_embeds + + def tie_weights(self): + """ + Tie the weights between the input embeddings list and the output embeddings list. + + If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the + weights instead. + """ + if getattr(self.config, "tie_word_embeddings", True): + self._tied_weights_keys = [] + output_embeddings = self.get_output_embeddings() + input_embeddings = self.get_input_embeddings() + + for i in range(self.config.n_codes_total - self.config.n_codes_given): + # self.input_embeds_layers[i + 1].weight = self.lm_heads[i].weight + self._tie_or_clone_weights(output_embeddings[i], input_embeddings[i + 1]) + self._tied_weights_keys.append(f"lm_heads.{i}.weight") + + for module in self.modules(): + if hasattr(module, "_tie_weights"): + module._tie_weights() + + @add_start_docstrings_to_model_forward(BARK_FINE_INPUTS_DOCSTRING) + def forward( + self, + codebook_idx: int, # an additionnal idx corresponding to the id of the codebook that will be predicted + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + labels: Optional[torch.LongTensor] = None, + input_embeds: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if codebook_idx == 0: + raise ValueError("Cannot predict 0th codebook - 0th codebook should be predicted by the coarse model") + + if input_ids is not None and input_embeds is not None: + raise ValueError("You cannot specify both input_ids and input_embeds at the same time") + + if input_ids is None and input_embeds is None: + raise ValueError("You have to specify either input_ids or input_embeds") + + if input_ids is not None: + # the input_embeddings are the sum of the j previous codebooks embeddings before + # the current codebook_idx codebook + + # forward the GPT model itself + input_embeds = [ + input_embeds_layer(input_ids[:, :, i]).unsqueeze(-1) + for i, input_embeds_layer in enumerate(self.input_embeds_layers) + ] # token embeddings of shape (b, t, n_embd) + input_embeds = torch.cat(input_embeds, dim=-1) + input_embeds = input_embeds[:, :, :, : codebook_idx + 1].sum(dim=-1) + + input_shape = input_embeds.size()[:-1] + batch_size = input_embeds.shape[0] + seq_length = input_shape[1] + + device = input_ids.device if input_ids is not None else input_embeds.device + + if position_ids is None: + position_ids = torch.arange(0, seq_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0) # shape (1, seq_length) + + position_embeds = self.position_embeds_layer(position_ids) # position embeddings of shape (1, t, n_embd) + + # Attention mask. + if attention_mask is not None: + if batch_size <= 0: + raise ValueError("batch_size has to be defined and > 0") + if self._use_flash_attention_2: + attention_mask = attention_mask if 0 in attention_mask else None + else: + # [bsz, to_seq_length] -> [bsz, 1, 1, to_seq_length] + # from_seq_length is 1 to easily broadcast + attention_mask = _prepare_4d_attention_mask(attention_mask, input_embeds.dtype, tgt_len=1) + + head_mask = self.get_head_mask(head_mask, self.config.num_layers) + + hidden_states = self.drop(input_embeds + position_embeds) + output_shape = input_shape + (hidden_states.size(-1),) + + all_self_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + for i, block in enumerate(self.layers): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + outputs = block( + hidden_states, + attention_mask=attention_mask, + head_mask=head_mask[i], + output_attentions=output_attentions, + ) + + hidden_states = outputs[0] + + if output_attentions: + all_self_attentions = all_self_attentions + (outputs[1],) + + hidden_states = self.layernorm_final(hidden_states) + hidden_states = hidden_states.view(output_shape) + + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + logits = self.lm_heads[codebook_idx - self.config.n_codes_given](hidden_states) + + loss = None + if labels is not None: + raise NotImplementedError("Training is not implemented yet") + + if not return_dict: + return tuple(v for v in [None, logits, all_hidden_states, all_self_attentions] if v is not None) + + return MaskedLMOutput( + loss=loss, + logits=logits, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + def generate( + self, + coarse_output: torch.Tensor, + semantic_generation_config: BarkSemanticGenerationConfig = None, + coarse_generation_config: BarkCoarseGenerationConfig = None, + fine_generation_config: BarkFineGenerationConfig = None, + codebook_size: int = 1024, + history_prompt: Optional[Dict[str, torch.Tensor]] = None, + **kwargs, + ) -> torch.LongTensor: + """ + Generates fine acoustics tokens from input coarse acoustics tokens and an additional optional `Bark` speaker + prompt. + + Args: + coarse_output (`torch.Tensor` of shape (batch_size, seq_len)): + Input coarse acoustics ids, i.e the output of `BarkCoarseModel.generate`. + semantic_generation_config (`BarkSemanticGenerationConfig`): + Generation config indicating how to generate the semantic tokens. + coarse_generation_config (`BarkCoarseGenerationConfig`): + Generation config indicating how to generate the coarse tokens. + fine_generation_config (`BarkFineGenerationConfig`): + Generation config indicating how to generate the fine tokens. + codebook_size (`int`, *optional*, defaults to 1024): + Codebook channel size, i.e. the size of the output vocabulary per codebook channel. + history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): + Optional `Bark` speaker prompt. + Returns: + torch.LongTensor: Output fine acoustics tokens. + """ + if semantic_generation_config is None: + raise ValueError("`semantic_generation_config` has to be provided") + + if coarse_generation_config is None: + raise ValueError("`coarse_generation_config` has to be provided") + + if fine_generation_config is None: + raise ValueError("`fine_generation_config` has to be provided") + + # since we don't really use GenerationConfig through the fine model (autoencoder) + # and since only temperature is used from the classic GenerationConfig parameters + # manually impose the kwargs priority over the generation config + temperature = kwargs.get("temperature", fine_generation_config.temperature) + + max_fine_history_length = fine_generation_config.max_fine_history_length + max_fine_input_length = fine_generation_config.max_fine_input_length + + # shape: (batch, n_coarse_codebooks * seq_len) + # new_shape: (batch, seq_len, n_coarse_codebooks) + coarse_output = coarse_output.view(coarse_output.shape[0], -1, coarse_generation_config.n_coarse_codebooks) + + # brings ids into the range [0, codebook_size -1] + coarse_output = torch.remainder(coarse_output - semantic_generation_config.semantic_vocab_size, codebook_size) + batch_size = coarse_output.shape[0] + + if history_prompt is not None: + x_fine_history = torch.repeat_interleave(history_prompt["fine_prompt"].T[None], batch_size, dim=0) + # transpose to get to shape (seq_len, n_fine_codebooks) + else: + x_fine_history = None + + n_coarse = coarse_generation_config.n_coarse_codebooks + + # pad the last 6th codebooks + fine_input = F.pad( + coarse_output, + (0, fine_generation_config.n_fine_codebooks - n_coarse), + "constant", + codebook_size, + ) + + # prepend history if available (max max_fine_history_length) + if x_fine_history is not None: + fine_input = torch.cat([x_fine_history[:, -max_fine_history_length:, :], fine_input], dim=1) + + # len of the fine_history that has been added to fine_input + n_history = x_fine_history[:, -max_fine_history_length:, :].shape[1] + else: + n_history = 0 + + n_remove_from_end = 0 + # need to pad if too short (since non-causal model) + if fine_input.shape[1] < max_fine_input_length: + n_remove_from_end = max_fine_input_length - fine_input.shape[1] + fine_input = F.pad(fine_input, (0, 0, 0, n_remove_from_end), mode="constant", value=codebook_size) + + # we can be lazy about fractional loop and just keep overwriting codebooks. + # seems that coarse_output.shape[1] - (max_fine_input_length - n_history) is equal to minus n_remove_from_end + # So if we needed to pad because too short, n_loops is always 1 (because n_remove_from_end > 0) + # If not, we loop over at least twice. + + n_loops = (coarse_output.shape[1] - (max_fine_input_length - n_history)) / max_fine_history_length + n_loops = int(np.ceil(n_loops)) + n_loops = max(0, n_loops) + 1 + + for n_outer in range(n_loops): + start_idx = min([n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_input_length]) + + start_fill_idx = min( + [n_history + n_outer * max_fine_history_length, fine_input.shape[1] - max_fine_history_length] + ) + rel_start_fill_idx = start_fill_idx - start_idx + input_buffer = fine_input[:, start_idx : start_idx + max_fine_input_length, :] + for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): + logits = self.forward(n_inner, input_buffer).logits + if temperature is None or temperature == 1.0: + relevant_logits = logits[:, rel_start_fill_idx:, :codebook_size] + codebook_preds = torch.argmax(relevant_logits, -1) + else: + relevant_logits = logits[:, :, :codebook_size] / temperature + # apply softmax + probs = F.softmax(relevant_logits, dim=-1)[:, rel_start_fill_idx:max_fine_input_length] + # reshape to 2D: (batch_size, seq_len, codebook_size) -> (batch_size*seq_len, codebook_size) + probs = probs.reshape((-1, codebook_size)) + # multinomial then reshape : (batch_size*seq_len)-> (batch_size,seq_len) + codebook_preds = torch.multinomial(probs, num_samples=1).view(batch_size, -1) + codebook_preds = codebook_preds.to(torch.int32) + input_buffer[:, rel_start_fill_idx:, n_inner] = codebook_preds + del logits, codebook_preds + + # transfer into fine_input + for n_inner in range(n_coarse, fine_generation_config.n_fine_codebooks): + fine_input[ + :, start_fill_idx : start_fill_idx + (max_fine_input_length - rel_start_fill_idx), n_inner + ] = input_buffer[:, rel_start_fill_idx:, n_inner] + del input_buffer + + fine_input = fine_input.transpose(1, 2)[:, :, n_history:] + if n_remove_from_end > 0: + fine_input = fine_input[:, :, :-n_remove_from_end] + + if fine_input.shape[-1] != coarse_output.shape[-2]: + raise ValueError("input and output should have the same seq_len") + + return fine_input + + +@add_start_docstrings( + """ + The full Bark model, a text-to-speech model composed of 4 sub-models: + - [`BarkSemanticModel`] (also referred to as the 'text' model): a causal auto-regressive transformer model that + takes + as input tokenized text, and predicts semantic text tokens that capture the meaning of the text. + - [`BarkCoarseModel`] (also refered to as the 'coarse acoustics' model), also a causal autoregressive transformer, + that takes into input the results of the last model. It aims at regressing the first two audio codebooks necessary + to `encodec`. + - [`BarkFineModel`] (the 'fine acoustics' model), this time a non-causal autoencoder transformer, which iteratively + predicts the last codebooks based on the sum of the previous codebooks embeddings. + - having predicted all the codebook channels from the [`EncodecModel`], Bark uses it to decode the output audio + array. + + It should be noted that each of the first three modules can support conditional speaker embeddings to condition the + output sound according to specific predefined voice. + """, + BARK_START_DOCSTRING, +) +class BarkModel(BarkPreTrainedModel): + config_class = BarkConfig + + def __init__(self, config): + super().__init__(config) + + self.semantic = BarkSemanticModel(config.semantic_config) + self.coarse_acoustics = BarkCoarseModel(config.coarse_acoustics_config) + self.fine_acoustics = BarkFineModel(config.fine_acoustics_config) + + self.codec_model = AutoModel.from_config(config.codec_config) + + self.config = config + + @property + def device(self) -> torch.device: + """ + `torch.device`: The device on which the module is (assuming that all the module parameters are on the same + device). + """ + # for bark_model, device must be verified on its sub-models + # if has _hf_hook, has been offloaded so the device has to be found in the hook + if not hasattr(self.semantic, "_hf_hook"): + return get_parameter_device(self) + for module in self.semantic.modules(): + if ( + hasattr(module, "_hf_hook") + and hasattr(module._hf_hook, "execution_device") + and module._hf_hook.execution_device is not None + ): + return torch.device(module._hf_hook.execution_device) + + def enable_cpu_offload(self, gpu_id: Optional[int] = 0): + r""" + Offloads all sub-models to CPU using accelerate, reducing memory usage with a low impact on performance. This + method moves one whole sub-model at a time to the GPU when it is used, and the sub-model remains in GPU until + the next sub-model runs. + + Args: + gpu_id (`int`, *optional*, defaults to 0): + GPU id on which the sub-models will be loaded and offloaded. + """ + if is_accelerate_available(): + from accelerate import cpu_offload_with_hook + else: + raise ImportError("`enable_model_cpu_offload` requires `accelerate`.") + + device = torch.device(f"cuda:{gpu_id}") + + if self.device.type != "cpu": + self.to("cpu") + torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) + + # this layer is used outside the first foward pass of semantic so need to be loaded before semantic + self.semantic.input_embeds_layer, _ = cpu_offload_with_hook(self.semantic.input_embeds_layer, device) + + hook = None + for cpu_offloaded_model in [ + self.semantic, + self.coarse_acoustics, + self.fine_acoustics, + ]: + _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook) + + self.fine_acoustics_hook = hook + + _, hook = cpu_offload_with_hook(self.codec_model, device, prev_module_hook=hook) + + # We'll offload the last model manually. + self.codec_model_hook = hook + + def codec_decode(self, fine_output, output_lengths=None): + """Turn quantized audio codes into audio array using encodec.""" + + fine_output = fine_output.transpose(0, 1) + emb = self.codec_model.quantizer.decode(fine_output) + + if output_lengths is not None: + # encodec uses LSTMs which behaves differently with appended padding + # decoding with encodec takes around 0.1% of the total generation time + # to keep generation quality, we break batching + out = [sample[:, :l].unsqueeze(0) for (sample, l) in zip(emb, output_lengths)] + audio_arr = [self.codec_model.decoder(sample).squeeze() for sample in out] + else: + out = self.codec_model.decoder(emb) + audio_arr = out.squeeze(1) # squeeze the codebook dimension + + return audio_arr + + @torch.no_grad() + def generate( + self, + input_ids: Optional[torch.Tensor] = None, + history_prompt: Optional[Dict[str, torch.Tensor]] = None, + return_output_lengths: Optional[bool] = None, + **kwargs, + ) -> torch.LongTensor: + """ + Generates audio from an input prompt and an additional optional `Bark` speaker prompt. + + Args: + input_ids (`Optional[torch.Tensor]` of shape (batch_size, seq_len), *optional*): + Input ids. Will be truncated up to 256 tokens. Note that the output audios will be as long as the + longest generation among the batch. + history_prompt (`Optional[Dict[str,torch.Tensor]]`, *optional*): + Optional `Bark` speaker prompt. Note that for now, this model takes only one speaker prompt per batch. + kwargs (*optional*): Remaining dictionary of keyword arguments. Keyword arguments are of two types: + + - Without a prefix, they will be entered as `**kwargs` for the `generate` method of each sub-model. + - With a *semantic_*, *coarse_*, *fine_* prefix, they will be input for the `generate` method of the + semantic, coarse and fine respectively. It has the priority over the keywords without a prefix. + + This means you can, for example, specify a generation strategy for all sub-models except one. + return_output_lengths (`bool`, *optional*): + Whether or not to return the waveform lengths. Useful when batching. + Returns: + By default: + - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform. + When `return_output_lengths=True`: + Returns a tuple made of: + - **audio_waveform** (`torch.Tensor` of shape (batch_size, seq_len)): Generated audio waveform. + - **output_lengths** (`torch.Tensor` of shape (batch_size)): The length of each waveform in the batch + Example: + + ```python + >>> from transformers import AutoProcessor, BarkModel + + >>> processor = AutoProcessor.from_pretrained("suno/bark-small") + >>> model = BarkModel.from_pretrained("suno/bark-small") + + >>> # To add a voice preset, you can pass `voice_preset` to `BarkProcessor.__call__(...)` + >>> voice_preset = "v2/en_speaker_6" + + >>> inputs = processor("Hello, my dog is cute, I need him in my life", voice_preset=voice_preset) + + >>> audio_array = model.generate(**inputs, semantic_max_new_tokens=100) + >>> audio_array = audio_array.cpu().numpy().squeeze() + ``` + """ + # TODO (joao):workaround until nested generation config is compatible with PreTrained Model + # todo: dict + semantic_generation_config = BarkSemanticGenerationConfig(**self.generation_config.semantic_config) + coarse_generation_config = BarkCoarseGenerationConfig(**self.generation_config.coarse_acoustics_config) + fine_generation_config = BarkFineGenerationConfig(**self.generation_config.fine_acoustics_config) + + kwargs_semantic = { + # if "attention_mask" is set, it should not be passed to CoarseModel and FineModel + "attention_mask": kwargs.pop("attention_mask", None), + "min_eos_p": kwargs.pop("min_eos_p", None), + } + kwargs_coarse = {} + kwargs_fine = {} + for key, value in kwargs.items(): + if key.startswith("semantic_"): + key = key[len("semantic_") :] + kwargs_semantic[key] = value + elif key.startswith("coarse_"): + key = key[len("coarse_") :] + kwargs_coarse[key] = value + elif key.startswith("fine_"): + key = key[len("fine_") :] + kwargs_fine[key] = value + else: + # If the key is already in a specific config, then it's been set with a + # submodules specific value and we don't override + if key not in kwargs_semantic: + kwargs_semantic[key] = value + if key not in kwargs_coarse: + kwargs_coarse[key] = value + if key not in kwargs_fine: + kwargs_fine[key] = value + + # 1. Generate from the semantic model + semantic_output = self.semantic.generate( + input_ids, + history_prompt=history_prompt, + semantic_generation_config=semantic_generation_config, + **kwargs_semantic, + ) + + # 2. Generate from the coarse model + coarse_output = self.coarse_acoustics.generate( + semantic_output, + history_prompt=history_prompt, + semantic_generation_config=semantic_generation_config, + coarse_generation_config=coarse_generation_config, + codebook_size=self.generation_config.codebook_size, + return_output_lengths=return_output_lengths, + **kwargs_coarse, + ) + + output_lengths = None + if return_output_lengths: + coarse_output, output_lengths = coarse_output + # (batch_size, seq_len*coarse_codebooks) -> (batch_size, seq_len) + output_lengths = output_lengths // coarse_generation_config.n_coarse_codebooks + + # 3. "generate" from the fine model + output = self.fine_acoustics.generate( + coarse_output, + history_prompt=history_prompt, + semantic_generation_config=semantic_generation_config, + coarse_generation_config=coarse_generation_config, + fine_generation_config=fine_generation_config, + codebook_size=self.generation_config.codebook_size, + **kwargs_fine, + ) + + if getattr(self, "fine_acoustics_hook", None) is not None: + # Manually offload fine_acoustics to CPU + # and load codec_model to GPU + # since bark doesn't use codec_model forward pass + self.fine_acoustics_hook.offload() + self.codec_model = self.codec_model.to(self.device) + + # 4. Decode the output and generate audio array + audio = self.codec_decode(output, output_lengths) + + if getattr(self, "codec_model_hook", None) is not None: + # Offload codec_model to CPU + self.codec_model_hook.offload() + + if return_output_lengths: + output_lengths = [len(sample) for sample in audio] + audio = nn.utils.rnn.pad_sequence(audio, batch_first=True, padding_value=0) + return audio, output_lengths + + return audio + + @classmethod + def _check_and_enable_flash_attn_2( + cls, + config, + torch_dtype: Optional[torch.dtype] = None, + device_map: Optional[Union[str, Dict[str, int]]] = None, + hard_check_only: bool = False, + check_device_map: bool = False, + ): + """ + `_check_and_enable_flash_attn_2` originally don't expand flash attention enabling to the model + sub-configurations. We override the original method to make sure that Bark sub-models are using Flash Attention + if necessary. + + If you don't know about Flash Attention, check out the official repository of flash attention: + https://github.com/Dao-AILab/flash-attention + + For using Flash Attention 1.0 you can do it directly via the `BetterTransformer` API, have a look at this + specific section of the documentation to learn more about it: + https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#decoder-models + + The method checks if the current setup is compatible with Flash Attention as it requires the model to be in + half precision and not ran on CPU. + + If all checks pass and `hard_check_only` is False, the method will set the config attribute `_attn_implementation` to "flash_attention_2" so that the model + can initialize the correct attention module + """ + config = super()._check_and_enable_flash_attn_2( + config, torch_dtype, device_map, hard_check_only=hard_check_only, check_device_map=check_device_map + ) + + config.semantic_config._attn_implementation = config._attn_implementation + config.coarse_acoustics_config._attn_implementation = config._attn_implementation + config.fine_acoustics_config._attn_implementation = config._attn_implementation + return config diff --git a/venv/lib/python3.10/site-packages/transformers/models/bark/processing_bark.py b/venv/lib/python3.10/site-packages/transformers/models/bark/processing_bark.py new file mode 100644 index 0000000000000000000000000000000000000000..d58b89bf6f8f9ba89fa47b8ed6f84390918721b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/bark/processing_bark.py @@ -0,0 +1,286 @@ +# coding=utf-8 +# Copyright 2023 The Suno AI Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Processor class for Bark +""" +import json +import os +from typing import Optional + +import numpy as np + +from ...feature_extraction_utils import BatchFeature +from ...processing_utils import ProcessorMixin +from ...utils import logging +from ...utils.hub import get_file_from_repo +from ..auto import AutoTokenizer + + +logger = logging.get_logger(__name__) + + +class BarkProcessor(ProcessorMixin): + r""" + Constructs a Bark processor which wraps a text tokenizer and optional Bark voice presets into a single processor. + + Args: + tokenizer ([`PreTrainedTokenizer`]): + An instance of [`PreTrainedTokenizer`]. + speaker_embeddings (`Dict[Dict[str]]`, *optional*): + Optional nested speaker embeddings dictionary. The first level contains voice preset names (e.g + `"en_speaker_4"`). The second level contains `"semantic_prompt"`, `"coarse_prompt"` and `"fine_prompt"` + embeddings. The values correspond to the path of the corresponding `np.ndarray`. See + [here](https://suno-ai.notion.site/8b8e8749ed514b0cbf3f699013548683?v=bc67cff786b04b50b3ceb756fd05f68c) for + a list of `voice_preset_names`. + + """ + + tokenizer_class = "AutoTokenizer" + attributes = ["tokenizer"] + + preset_shape = { + "semantic_prompt": 1, + "coarse_prompt": 2, + "fine_prompt": 2, + } + + def __init__(self, tokenizer, speaker_embeddings=None): + super().__init__(tokenizer) + + self.speaker_embeddings = speaker_embeddings + + @classmethod + def from_pretrained( + cls, pretrained_processor_name_or_path, speaker_embeddings_dict_path="speaker_embeddings_path.json", **kwargs + ): + r""" + Instantiate a Bark processor associated with a pretrained model. + + Args: + pretrained_model_name_or_path (`str` or `os.PathLike`): + This can be either: + + - a string, the *model id* of a pretrained [`BarkProcessor`] hosted inside a model repo on + huggingface.co. + - a path to a *directory* containing a processor saved using the [`~BarkProcessor.save_pretrained`] + method, e.g., `./my_model_directory/`. + speaker_embeddings_dict_path (`str`, *optional*, defaults to `"speaker_embeddings_path.json"`): + The name of the `.json` file containing the speaker_embeddings dictionnary located in + `pretrained_model_name_or_path`. If `None`, no speaker_embeddings is loaded. + **kwargs + Additional keyword arguments passed along to both + [`~tokenization_utils_base.PreTrainedTokenizer.from_pretrained`]. + """ + + if speaker_embeddings_dict_path is not None: + speaker_embeddings_path = get_file_from_repo( + pretrained_processor_name_or_path, + speaker_embeddings_dict_path, + subfolder=kwargs.pop("subfolder", None), + cache_dir=kwargs.pop("cache_dir", None), + force_download=kwargs.pop("force_download", False), + proxies=kwargs.pop("proxies", None), + resume_download=kwargs.pop("resume_download", False), + local_files_only=kwargs.pop("local_files_only", False), + token=kwargs.pop("use_auth_token", None), + revision=kwargs.pop("revision", None), + ) + if speaker_embeddings_path is None: + logger.warning( + f"""`{os.path.join(pretrained_processor_name_or_path,speaker_embeddings_dict_path)}` does not exists + , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json + dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.""" + ) + speaker_embeddings = None + else: + with open(speaker_embeddings_path) as speaker_embeddings_json: + speaker_embeddings = json.load(speaker_embeddings_json) + else: + speaker_embeddings = None + + tokenizer = AutoTokenizer.from_pretrained(pretrained_processor_name_or_path, **kwargs) + + return cls(tokenizer=tokenizer, speaker_embeddings=speaker_embeddings) + + def save_pretrained( + self, + save_directory, + speaker_embeddings_dict_path="speaker_embeddings_path.json", + speaker_embeddings_directory="speaker_embeddings", + push_to_hub: bool = False, + **kwargs, + ): + """ + Saves the attributes of this processor (tokenizer...) in the specified directory so that it can be reloaded + using the [`~BarkProcessor.from_pretrained`] method. + + Args: + save_directory (`str` or `os.PathLike`): + Directory where the tokenizer files and the speaker embeddings will be saved (directory will be created + if it does not exist). + speaker_embeddings_dict_path (`str`, *optional*, defaults to `"speaker_embeddings_path.json"`): + The name of the `.json` file that will contains the speaker_embeddings nested path dictionnary, if it + exists, and that will be located in `pretrained_model_name_or_path/speaker_embeddings_directory`. + speaker_embeddings_directory (`str`, *optional*, defaults to `"speaker_embeddings/"`): + The name of the folder in which the speaker_embeddings arrays will be saved. + push_to_hub (`bool`, *optional*, defaults to `False`): + Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the + repository you want to push to with `repo_id` (will default to the name of `save_directory` in your + namespace). + kwargs: + Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method. + """ + if self.speaker_embeddings is not None: + os.makedirs(os.path.join(save_directory, speaker_embeddings_directory, "v2"), exist_ok=True) + + embeddings_dict = {} + + embeddings_dict["repo_or_path"] = save_directory + + for prompt_key in self.speaker_embeddings: + if prompt_key != "repo_or_path": + voice_preset = self._load_voice_preset(prompt_key) + + tmp_dict = {} + for key in self.speaker_embeddings[prompt_key]: + np.save( + os.path.join( + embeddings_dict["repo_or_path"], speaker_embeddings_directory, f"{prompt_key}_{key}" + ), + voice_preset[key], + allow_pickle=False, + ) + tmp_dict[key] = os.path.join(speaker_embeddings_directory, f"{prompt_key}_{key}.npy") + + embeddings_dict[prompt_key] = tmp_dict + + with open(os.path.join(save_directory, speaker_embeddings_dict_path), "w") as fp: + json.dump(embeddings_dict, fp) + + super().save_pretrained(save_directory, push_to_hub, **kwargs) + + def _load_voice_preset(self, voice_preset: str = None, **kwargs): + voice_preset_paths = self.speaker_embeddings[voice_preset] + + voice_preset_dict = {} + for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: + if key not in voice_preset_paths: + raise ValueError( + f"Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}]." + ) + + path = get_file_from_repo( + self.speaker_embeddings.get("repo_or_path", "/"), + voice_preset_paths[key], + subfolder=kwargs.pop("subfolder", None), + cache_dir=kwargs.pop("cache_dir", None), + force_download=kwargs.pop("force_download", False), + proxies=kwargs.pop("proxies", None), + resume_download=kwargs.pop("resume_download", False), + local_files_only=kwargs.pop("local_files_only", False), + token=kwargs.pop("use_auth_token", None), + revision=kwargs.pop("revision", None), + ) + if path is None: + raise ValueError( + f"""`{os.path.join(self.speaker_embeddings.get("repo_or_path", "/"),voice_preset_paths[key])}` does not exists + , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset} + embeddings.""" + ) + + voice_preset_dict[key] = np.load(path) + + return voice_preset_dict + + def _validate_voice_preset_dict(self, voice_preset: Optional[dict] = None): + for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]: + if key not in voice_preset: + raise ValueError(f"Voice preset unrecognized, missing {key} as a key.") + + if not isinstance(voice_preset[key], np.ndarray): + raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.") + + if len(voice_preset[key].shape) != self.preset_shape[key]: + raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.") + + def __call__( + self, + text=None, + voice_preset=None, + return_tensors="pt", + max_length=256, + add_special_tokens=False, + return_attention_mask=True, + return_token_type_ids=False, + **kwargs, + ): + """ + Main method to prepare for the model one or several sequences(s). This method forwards the `text` and `kwargs` + arguments to the AutoTokenizer's [`~AutoTokenizer.__call__`] to encode the text. The method also proposes a + voice preset which is a dictionary of arrays that conditions `Bark`'s output. `kwargs` arguments are forwarded + to the tokenizer and to `cached_file` method if `voice_preset` is a valid filename. + + Args: + text (`str`, `List[str]`, `List[List[str]]`): + The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings + (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set + `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). + voice_preset (`str`, `Dict[np.ndarray]`): + The voice preset, i.e the speaker embeddings. It can either be a valid voice_preset name, e.g + `"en_speaker_1"`, or directly a dictionnary of `np.ndarray` embeddings for each submodel of `Bark`. Or + it can be a valid file name of a local `.npz` single voice preset. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Acceptable values are: + + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + + Returns: + Tuple([`BatchEncoding`], [`BatchFeature`]): A tuple composed of a [`BatchEncoding`], i.e the output of the + `tokenizer` and a [`BatchFeature`], i.e the voice preset with the right tensors type. + """ + if voice_preset is not None and not isinstance(voice_preset, dict): + if ( + isinstance(voice_preset, str) + and self.speaker_embeddings is not None + and voice_preset in self.speaker_embeddings + ): + voice_preset = self._load_voice_preset(voice_preset) + + else: + if isinstance(voice_preset, str) and not voice_preset.endswith(".npz"): + voice_preset = voice_preset + ".npz" + + voice_preset = np.load(voice_preset) + + if voice_preset is not None: + self._validate_voice_preset_dict(voice_preset, **kwargs) + voice_preset = BatchFeature(data=voice_preset, tensor_type=return_tensors) + + encoded_text = self.tokenizer( + text, + return_tensors=return_tensors, + padding="max_length", + max_length=max_length, + return_attention_mask=return_attention_mask, + return_token_type_ids=return_token_type_ids, + add_special_tokens=add_special_tokens, + **kwargs, + ) + + if voice_preset is not None: + encoded_text["history_prompt"] = voice_preset + + return encoded_text diff --git a/venv/lib/python3.10/site-packages/transformers/models/cohere/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/cohere/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f69d1e496d0e616b185c819fd847fcc019fc39 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/cohere/__init__.py @@ -0,0 +1,77 @@ +# Copyright 2024 Cohere and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_sentencepiece_available, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_cohere": ["COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP", "CohereConfig"], +} + + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_cohere_fast"] = ["CohereTokenizerFast"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_cohere"] = [ + "CohereForCausalLM", + "CohereModel", + "CoherePreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_cohere import COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP, CohereConfig + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_cohere_fast import CohereTokenizerFast + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_cohere import ( + CohereForCausalLM, + CohereModel, + CoherePreTrainedModel, + ) + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d45d5406a9e9ef9e10c0cfc498ede25b4f801247 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/configuration_cohere.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/configuration_cohere.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d6b56c59971c5447243444b1b6f66b3252049b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/configuration_cohere.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/modeling_cohere.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/modeling_cohere.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7114e4886d8ab5b61fe833968568d4ee73e86b4e Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/modeling_cohere.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/tokenization_cohere_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/tokenization_cohere_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12d9350c36595d049828b596fe4c20418122670f Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/cohere/__pycache__/tokenization_cohere_fast.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/cohere/configuration_cohere.py b/venv/lib/python3.10/site-packages/transformers/models/cohere/configuration_cohere.py new file mode 100644 index 0000000000000000000000000000000000000000..7ceca2b887af7d6529bb9a8ee3a8e65b25a8fe1f --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/cohere/configuration_cohere.py @@ -0,0 +1,159 @@ +# coding=utf-8 +# Copyright 2024 Cohere team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" Cohere model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + +COHERE_PRETRAINED_CONFIG_ARCHIVE_MAP = {} + + +class CohereConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`CohereModel`]. It is used to instantiate an Cohere + model according to the specified arguments, defining the model architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. Instantiating a configuration + with the defaults will yield a similar configuration to that of the [CohereForAI/c4ai-command-r-v01](https://huggingface.co/CohereForAI/c4ai-command-r-v01) model. + + + Args: + vocab_size (`int`, *optional*, defaults to 256000): + Vocabulary size of the Cohere model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`CohereModel`] + hidden_size (`int`, *optional*, defaults to 8192): + Dimension of the hidden representations. + intermediate_size (`int`, *optional*, defaults to 22528): + Dimension of the MLP representations. + logit_scale (`float`, *optional*, defaults to 0.0625): + The scaling factor for the output logits. + num_hidden_layers (`int`, *optional*, defaults to 40): + Number of hidden layers in the Transformer decoder. + num_attention_heads (`int`, *optional*, defaults to 64): + Number of attention heads for each attention layer in the Transformer decoder. + num_key_value_heads (`int`, *optional*): + This is the number of key_value heads that should be used to implement Grouped Query Attention. If + `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if + `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When + converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed + by meanpooling all the original heads within that group. For more details checkout [this + paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to + `num_attention_heads`. + hidden_act (`str` or `function`, *optional*, defaults to `"silu"`): + The non-linear activation function (function or string) in the decoder. + max_position_embeddings (`int`, *optional*, defaults to 8192): + The maximum sequence length that this model might ever be used with. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + pad_token_id (`int`, *optional*, defaults to 0): + Padding token id. + bos_token_id (`int`, *optional*, defaults to 5): + Beginning of stream token id. + eos_token_id (`int`, *optional*, defaults to 255001): + End of stream token id. + tie_word_embeddings (`bool`, *optional*, defaults to `True`): + Whether to tie weight embeddings + rope_theta (`float`, *optional*, defaults to 10000.0): + The base period of the RoPE embeddings. + attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`): + Whether to use a bias in the query, key, value and output projection layers during self-attention. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + use_qk_norm (`bool`, *optional*, defaults to `False`): + Whether to use query-key normalization in the attention + + ```python + >>> from transformers import CohereModel, CohereConfig + + >>> # Initializing a Cohere model configuration + >>> configuration = CohereConfig() + + >>> # Initializing a model from the Cohere configuration + >>> model = CohereModel(configuration) # doctest: +SKIP + + >>> # Accessing the model configuration + >>> configuration = model.config # doctest: +SKIP + ```""" + + model_type = "cohere" + keys_to_ignore_at_inference = ["past_key_values"] + + def __init__( + self, + vocab_size=256000, + hidden_size=8192, + intermediate_size=22528, + logit_scale=0.0625, + num_hidden_layers=40, + num_attention_heads=64, + num_key_value_heads=None, + hidden_act="silu", + max_position_embeddings=8192, + initializer_range=0.02, + layer_norm_eps=1e-5, + use_cache=True, + pad_token_id=0, + bos_token_id=5, + eos_token_id=255001, + tie_word_embeddings=True, + rope_theta=10000.0, + attention_bias=False, + attention_dropout=0.0, + use_qk_norm=False, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.logit_scale = logit_scale + self.intermediate_size = intermediate_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + + # for backward compatibility + if num_key_value_heads is None: + num_key_value_heads = num_attention_heads + + self.num_key_value_heads = num_key_value_heads + self.hidden_act = hidden_act + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.use_cache = use_cache + self.rope_theta = rope_theta + self.attention_bias = attention_bias + self.attention_dropout = attention_dropout + self.use_qk_norm = use_qk_norm + + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + tie_word_embeddings=tie_word_embeddings, + **kwargs, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/cohere/modeling_cohere.py b/venv/lib/python3.10/site-packages/transformers/models/cohere/modeling_cohere.py new file mode 100644 index 0000000000000000000000000000000000000000..950d45ea867a3080bb804565edff1b2bb0e09e97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/cohere/modeling_cohere.py @@ -0,0 +1,1266 @@ +# coding=utf-8 +# Copyright 2024 Cohere team. All rights reserved. +# +# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX +# and OPT implementations in this library. It has been modified from its +# original forms to accommodate minor architectural differences compared +# to GPT-NeoX and OPT used by the Meta AI team that trained the model. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is based on the LLama model definition file in transformers + +"""PyTorch Cohere model.""" + +import math +import warnings +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import CrossEntropyLoss + +from ...activations import ACT2FN +from ...cache_utils import Cache, DynamicCache, StaticCache +from ...modeling_attn_mask_utils import AttentionMaskConverter +from ...modeling_outputs import ( + BaseModelOutputWithPast, + CausalLMOutputWithPast, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import ALL_LAYERNORM_LAYERS +from ...utils import ( + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + logging, + replace_return_docstrings, +) +from .configuration_cohere import CohereConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "CohereConfig" + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +class CohereLayerNorm(nn.Module): + def __init__(self, hidden_size=None, eps=1e-5, bias=False): + """The hidden size can be a tuple or an int. The tuple is used for QKNorm to normalize across head_dim""" + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + input_dtype = hidden_states.dtype + hidden_states = hidden_states.to(torch.float32) + mean = hidden_states.mean(-1, keepdim=True) + variance = (hidden_states - mean).pow(2).mean(-1, keepdim=True) + hidden_states = (hidden_states - mean) * torch.rsqrt(variance + self.variance_epsilon) + hidden_states = self.weight.to(torch.float32) * hidden_states + return hidden_states.to(input_dtype) + + +ALL_LAYERNORM_LAYERS.append(CohereLayerNorm) + + +class CohereRotaryEmbedding(nn.Module): + def __init__(self, dim, max_position_embeddings=2048, base=10000, device=None, scaling_factor=1.0): + super().__init__() + self.scaling_factor = scaling_factor + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, dtype=torch.int64).float().to(device) / self.dim)) + self.register_buffer("inv_freq", inv_freq, persistent=False) + + @torch.no_grad() + def forward(self, x, position_ids): + # x: [bs, num_attention_heads, seq_len, head_size] + inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1) + position_ids_expanded = position_ids[:, None, :].float() + + # Force float32 since bfloat16 loses precision on long contexts + # See https://github.com/huggingface/transformers/pull/29285 + device_type = x.device.type + device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu" + with torch.autocast(device_type=device_type, enabled=False): + freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2) + emb = torch.repeat_interleave(freqs, 2, dim=-1) + cos = emb.cos() + sin = emb.sin() + return cos, sin + + +def rotate_half(x): + # Split and rotate + x1 = x[..., ::2] + x2 = x[..., 1::2] + rot_x = torch.stack([-x2, x1], dim=-1).flatten(-2) + return rot_x + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1): + """Applies Rotary Position Embedding to the query and key tensors. + + Args: + q (`torch.Tensor`): The query tensor. + k (`torch.Tensor`): The key tensor. + cos (`torch.Tensor`): The cosine part of the rotary embedding. + sin (`torch.Tensor`): The sine part of the rotary embedding. + position_ids (`torch.Tensor`, *optional*): + Deprecated and unused. + unsqueeze_dim (`int`, *optional*, defaults to 1): + The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and + sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note + that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and + k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes + cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have + the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2. + Returns: + `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding. + """ + dtype = q.dtype + q = q.float() + k = k.float() + cos = cos.unsqueeze(unsqueeze_dim) + sin = sin.unsqueeze(unsqueeze_dim) + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed.to(dtype=dtype), k_embed.to(dtype=dtype) + + +# Copied from transformers.models.llama.modeling_llama.LlamaMLP Llama->Cohere +class CohereMLP(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.hidden_size = config.hidden_size + self.intermediate_size = config.intermediate_size + self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=False) + self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=False) + self.act_fn = ACT2FN[config.hidden_act] + + # Ignore copy + def forward(self, x): + down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x)) + return down_proj + + +# Copied from transformers.models.llama.modeling_llama.repeat_kv +def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor: + """ + This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch, + num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim) + """ + batch, num_key_value_heads, slen, head_dim = hidden_states.shape + if n_rep == 1: + return hidden_states + hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim) + return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim) + + +class CohereAttention(nn.Module): + """Multi-headed attention from 'Attention Is All You Need' paper""" + + def __init__(self, config: CohereConfig, layer_idx: Optional[int] = None): + super().__init__() + self.config = config + self.layer_idx = layer_idx + if layer_idx is None: + logger.warning_once( + f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will " + "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` " + "when creating this class." + ) + + self.attention_dropout = config.attention_dropout + self.hidden_size = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.hidden_size // self.num_heads + self.num_key_value_heads = config.num_key_value_heads + self.num_key_value_groups = self.num_heads // self.num_key_value_heads + self.max_position_embeddings = config.max_position_embeddings + self.rope_theta = config.rope_theta + self.is_causal = True + self.use_qk_norm = config.use_qk_norm + + if (self.head_dim * self.num_heads) != self.hidden_size: + raise ValueError( + f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" + f" and `num_heads`: {self.num_heads})." + ) + + if self.use_qk_norm: + # When sharding the model using Tensor Parallelism, need to be careful to use n_local_heads + self.q_norm = CohereLayerNorm(hidden_size=(self.num_heads, self.head_dim), eps=config.layer_norm_eps) + self.k_norm = CohereLayerNorm( + hidden_size=(self.num_key_value_heads, self.head_dim), eps=config.layer_norm_eps + ) + + self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias) + self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias) + self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias) + self._init_rope() + + # Ignore copy + def _init_rope(self): + self.rotary_emb = CohereRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + base=self.rope_theta, + ) + + # Ignore copy + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim) + if self.use_qk_norm: + query_states = self.q_norm(query_states) + key_states = self.k_norm(key_states) + + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + past_key_value = getattr(self, "past_key_value", past_key_value) + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; position_ids needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim) + + if attention_mask is not None: # no matter the length, we just slice it + causal_mask = attention_mask[:, :, :, : key_states.shape[-2]] + attn_weights = attn_weights + causal_mask + + # upcast attention to fp32 + attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype) + attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training) + attn_output = torch.matmul(attn_weights, value_states) + + if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim): + raise ValueError( + f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is" + f" {attn_output.size()}" + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + +# Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2 Llama->Cohere +class CohereFlashAttention2(CohereAttention): + """ + Cohere flash attention module. This module inherits from `CohereAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + output_attentions = False + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim) + if self.use_qk_norm: + query_states = self.q_norm(query_states) + key_states = self.k_norm(key_states) + + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + past_key_value = getattr(self, "past_key_value", past_key_value) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; position_ids needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache + # to be able to avoid many of these transpose/reshape/view. + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.transpose(1, 2) + + dropout_rate = self.attention_dropout if self.training else 0.0 + + # Ignore copy + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (CohereLayerNorm handles it correctly) + + input_dtype = query_states.dtype + if input_dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query_states = query_states.to(target_dtype) + key_states = key_states.to(target_dtype) + value_states = value_states.to(target_dtype) + + attn_output = self._flash_attention_forward( + query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate + ) + + attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous() + attn_output = self.o_proj(attn_output) + + if not output_attentions: + attn_weights = None + + return attn_output, attn_weights, past_key_value + + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`float`): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in CohereFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal + ) + + return attn_output + + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaSdpaAttention Llama->Cohere +class CohereSdpaAttention(CohereAttention): + """ + Cohere attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from + `CohereAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to + SDPA API. + """ + + # Ignore copy + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Cache] = None, + output_attentions: bool = False, + use_cache: bool = False, + cache_position: Optional[torch.LongTensor] = None, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: + if output_attentions: + # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented. + logger.warning_once( + "CohereModel is using CohereSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, " + 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' + ) + return super().forward( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + + bsz, q_len, _ = hidden_states.size() + + query_states = self.q_proj(hidden_states) + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim) + key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim) + if self.use_qk_norm: + query_states = self.q_norm(query_states) + key_states = self.k_norm(key_states) + + query_states = query_states.transpose(1, 2) + key_states = key_states.transpose(1, 2) + value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2) + + cos, sin = self.rotary_emb(value_states, position_ids) + query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) + + # In case static cache is used, it is an instance attribute. + past_key_value = getattr(self, "past_key_value", past_key_value) + + if past_key_value is not None: + # sin and cos are specific to RoPE models; cache_position needed for the static cache + cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position} + key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs) + + key_states = repeat_kv(key_states, self.num_key_value_groups) + value_states = repeat_kv(value_states, self.num_key_value_groups) + + causal_mask = attention_mask + # if attention_mask is not None and cache_position is not None: + if attention_mask is not None: + causal_mask = causal_mask[:, :, :, : key_states.shape[-2]] + + # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask, + # Reference: https://github.com/pytorch/pytorch/issues/112577. + if query_states.device.type == "cuda" and causal_mask is not None: + query_states = query_states.contiguous() + key_states = key_states.contiguous() + value_states = value_states.contiguous() + + # In case we are not compiling, we may set `causal_mask` to None, which is required to dispatch to SDPA's Flash Attention 2 backend, rather + # relying on the `is_causal` argument. + attn_output = torch.nn.functional.scaled_dot_product_attention( + query_states, + key_states, + value_states, + attn_mask=causal_mask, + dropout_p=self.attention_dropout if self.training else 0.0, + is_causal=causal_mask is None and q_len > 1, + ) + + attn_output = attn_output.transpose(1, 2).contiguous() + attn_output = attn_output.view(bsz, q_len, self.hidden_size) + + attn_output = self.o_proj(attn_output) + + return attn_output, None, past_key_value + + +COHERE_ATTENTION_CLASSES = { + "eager": CohereAttention, + "flash_attention_2": CohereFlashAttention2, + "sdpa": CohereSdpaAttention, +} + + +class CohereDecoderLayer(nn.Module): + def __init__(self, config: CohereConfig, layer_idx: int): + super().__init__() + self.hidden_size = config.hidden_size + + self.self_attn = COHERE_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx) + + self.mlp = CohereMLP(config) + self.input_layernorm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_value: Optional[Tuple[torch.Tensor]] = None, + output_attentions: Optional[bool] = False, + use_cache: Optional[bool] = False, + cache_position: Optional[torch.LongTensor] = None, + **kwargs, + ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]: + """ + Args: + hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` + attention_mask (`torch.FloatTensor`, *optional*): + attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1, + query_sequence_length, key_sequence_length)` if default attention is used. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under + returned tensors for more detail. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding + (see `past_key_values`). + past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states + """ + if "padding_mask" in kwargs: + warnings.warn( + "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`" + ) + + residual = hidden_states + + hidden_states = self.input_layernorm(hidden_states) + + # Self Attention + hidden_states_attention, self_attn_weights, present_key_value = self.self_attn( + hidden_states=hidden_states, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_value=past_key_value, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + **kwargs, + ) + + # Fully Connected + hidden_states_mlp = self.mlp(hidden_states) + + # Add everything together + hidden_states = residual + hidden_states_attention + hidden_states_mlp + + outputs = (hidden_states,) + + if output_attentions: + outputs += (self_attn_weights,) + + if use_cache: + outputs += (present_key_value,) + + return outputs + + +COHERE_START_DOCSTRING = r""" + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`CohereConfig`]): + Model configuration class with all the parameters of the model. Initializing with a config file does not + load the weights associated with the model, only the configuration. Check out the + [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + + +@add_start_docstrings( + "The bare Cohere Model outputting raw hidden-states without any specific head on top.", + COHERE_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaPreTrainedModel with Llama->Cohere +class CoherePreTrainedModel(PreTrainedModel): + config_class = CohereConfig + base_model_prefix = "model" + supports_gradient_checkpointing = True + _no_split_modules = ["CohereDecoderLayer"] + _skip_keys_device_placement = ["past_key_values"] + _supports_flash_attn_2 = True + _supports_sdpa = True + _supports_cache_class = True + + def _init_weights(self, module): + std = self.config.initializer_range + if isinstance(module, nn.Linear): + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + + def _setup_cache(self, cache_cls, max_batch_size, max_cache_len: Optional[int] = None): + if self.config._attn_implementation == "flash_attention_2" and cache_cls == StaticCache: + raise ValueError( + "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` " + "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers" + ) + + for layer in self.model.layers: + device = layer.input_layernorm.weight.device + if hasattr(self.config, "_pre_quantization_dtype"): + dtype = self.config._pre_quantization_dtype + else: + dtype = layer.self_attn.o_proj.weight.dtype + layer.self_attn.past_key_value = cache_cls( + self.config, max_batch_size, max_cache_len, device=device, dtype=dtype + ) + + def _reset_cache(self): + for layer in self.model.layers: + layer.self_attn.past_key_value = None + + +COHERE_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): + Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide + it. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + If `past_key_values` is used, optionally only the last `input_ids` have to be input (see + `past_key_values`). + + If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`] + and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more + information on the default strategy. + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.n_positions - 1]`. + + [What are position IDs?](../glossary#position-ids) + past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*): + Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention + blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values` + returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`. + + Two formats are allowed: + - a [`~cache_utils.Cache`] instance; + - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of + shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy + cache format. + + The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the + legacy cache format will be returned. + + If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't + have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids` + of shape `(batch_size, sequence_length)`. + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare Cohere Model outputting raw hidden-states without any specific head on top.", + COHERE_START_DOCSTRING, +) +# Copied from transformers.models.llama.modeling_llama.LlamaModel with Llama->Cohere +class CohereModel(CoherePreTrainedModel): + """ + Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`CohereDecoderLayer`] + + Args: + config: CohereConfig + """ + + # Ignore copy + def __init__(self, config: CohereConfig): + super().__init__(config) + self.padding_idx = config.pad_token_id + self.vocab_size = config.vocab_size + + self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx) + self.layers = nn.ModuleList( + [CohereDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)] + ) + self.norm = CohereLayerNorm(hidden_size=(config.hidden_size), eps=config.layer_norm_eps) + self.gradient_checkpointing = False + + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embed_tokens + + def set_input_embeddings(self, value): + self.embed_tokens = value + + # Ignore copy + @add_start_docstrings_to_model_forward(COHERE_INPUTS_DOCSTRING) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, BaseModelOutputWithPast]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if (input_ids is None) ^ (inputs_embeds is not None): + raise ValueError( + "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one" + ) + + if self.gradient_checkpointing and self.training and use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`." + ) + use_cache = False + + if inputs_embeds is None: + inputs_embeds = self.embed_tokens(input_ids) + + past_seen_tokens = 0 + if use_cache: # kept for BC (cache positions) + if not isinstance(past_key_values, StaticCache): + past_key_values = DynamicCache.from_legacy_cache(past_key_values) + past_seen_tokens = past_key_values.get_seq_length() + + if cache_position is None: + if isinstance(past_key_values, StaticCache): + raise ValueError("cache_position is a required argument when using StaticCache.") + cache_position = torch.arange( + past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device + ) + + if position_ids is None: + position_ids = cache_position.unsqueeze(0) + + causal_mask = self._update_causal_mask(attention_mask, inputs_embeds, cache_position, past_seen_tokens) + + # embed positions + hidden_states = inputs_embeds + + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + next_decoder_cache = None + + for decoder_layer in self.layers: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + decoder_layer.__call__, + hidden_states, + causal_mask, + position_ids, + past_key_values, + output_attentions, + use_cache, + cache_position, + ) + else: + layer_outputs = decoder_layer( + hidden_states, + attention_mask=causal_mask, + position_ids=position_ids, + past_key_value=past_key_values, + output_attentions=output_attentions, + use_cache=use_cache, + cache_position=cache_position, + ) + + hidden_states = layer_outputs[0] + + if use_cache: + next_decoder_cache = layer_outputs[2 if output_attentions else 1] + + if output_attentions: + all_self_attns += (layer_outputs[1],) + + hidden_states = self.norm(hidden_states) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + next_cache = None + if use_cache: + next_cache = ( + next_decoder_cache.to_legacy_cache() if isinstance(next_decoder_cache, Cache) else next_decoder_cache + ) + if not return_dict: + return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None) + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=next_cache, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + def _update_causal_mask( + self, + attention_mask: torch.Tensor, + input_tensor: torch.Tensor, + cache_position: torch.Tensor, + past_seen_tokens: int, + ): + # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static + # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes. + # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using + # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114 + + if self.config._attn_implementation == "flash_attention_2": + if attention_mask is not None and 0.0 in attention_mask: + return attention_mask + return None + + if self.config._attn_implementation == "sdpa": + # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, + # in order to dispatch on Flash Attention 2. + if AttentionMaskConverter._ignore_causal_mask_sdpa( + attention_mask, inputs_embeds=input_tensor, past_key_values_length=past_seen_tokens + ): + return None + + dtype, device = input_tensor.dtype, input_tensor.device + min_dtype = torch.finfo(dtype).min + sequence_length = input_tensor.shape[1] + if hasattr(getattr(self.layers[0], "self_attn", {}), "past_key_value"): # static cache + target_length = self.config.max_position_embeddings + else: # dynamic cache + target_length = ( + attention_mask.shape[-1] + if isinstance(attention_mask, torch.Tensor) + else past_seen_tokens + sequence_length + 1 + ) + + causal_mask = torch.full((sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device) + if sequence_length != 1: + causal_mask = torch.triu(causal_mask, diagonal=1) + causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1) + causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1) + if attention_mask is not None: + causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit + if attention_mask.dim() == 2: + mask_length = attention_mask.shape[-1] + padding_mask = causal_mask[..., :mask_length].eq(0.0) * attention_mask[:, None, None, :].eq(0.0) + causal_mask[..., :mask_length] = causal_mask[..., :mask_length].masked_fill(padding_mask, min_dtype) + elif attention_mask.dim() == 4: + # backwards compatibility: we allow passing a 4D attention mask shorter than the input length with + # cache. In that case, the 4D attention mask attends to the newest tokens only. + if attention_mask.shape[-2] < cache_position[0] + sequence_length: + offset = cache_position[0] + else: + offset = 0 + mask_shape = attention_mask.shape + mask_slice = (attention_mask.eq(0.0)).to(dtype=dtype) * min_dtype + causal_mask[ + : mask_shape[0], : mask_shape[1], offset : mask_shape[2] + offset, : mask_shape[3] + ] = mask_slice + + if ( + self.config._attn_implementation == "sdpa" + and attention_mask is not None + and attention_mask.device.type == "cuda" + ): + # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when + # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path. + # Details: https://github.com/pytorch/pytorch/issues/110213 + causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype) + + return causal_mask + + +# Copied from transformers.models.llama.modeling_llama.LlamaForCausalLM with Llama->Cohere +class CohereForCausalLM(CoherePreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + # Ignore copy + def __init__(self, config): + super().__init__(config) + self.model = CohereModel(config) + self.vocab_size = config.vocab_size + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + self.logit_scale = config.logit_scale + self.tie_word_embeddings = config.tie_word_embeddings + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.model.embed_tokens + + def set_input_embeddings(self, value): + self.model.embed_tokens = value + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def set_decoder(self, decoder): + self.model = decoder + + def get_decoder(self): + return self.model + + # Ignore copy + @add_start_docstrings_to_model_forward(COHERE_INPUTS_DOCSTRING) + @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC) + def forward( + self, + input_ids: torch.LongTensor = None, + attention_mask: Optional[torch.Tensor] = None, + position_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[List[torch.FloatTensor]] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + cache_position: Optional[torch.LongTensor] = None, + ) -> Union[Tuple, CausalLMOutputWithPast]: + r""" + Args: + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., + config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored + (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. + + Returns: + + Example: + + ```python + >> from transformers import AutoTokenizer, CohereForCausalLM + + >> model = CohereForCausalLM.from_pretrained("CohereForAI/c4ai-command-r-v01") + >> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01") + + >> prompt = "Hey, are you conscious? Can you talk to me?" + >> inputs = tokenizer(prompt, return_tensors="pt") + + >> # Generate + >> generate_ids = model.generate(inputs.input_ids, max_length=30) + >> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] + "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you." + ```""" + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + position_ids=position_ids, + past_key_values=past_key_values, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + cache_position=cache_position, + ) + + hidden_states = outputs[0] + logits = self.lm_head(hidden_states) + logits = logits * self.logit_scale + logits = logits.float() + + loss = None + if labels is not None: + # Shift so that tokens < n predict n + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + shift_logits = shift_logits.view(-1, self.config.vocab_size) + shift_labels = shift_labels.view(-1) + # Enable model parallelism + shift_labels = shift_labels.to(shift_logits.device) + loss = loss_fct(shift_logits, shift_labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return (loss,) + output if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=logits, + past_key_values=outputs.past_key_values, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def prepare_inputs_for_generation( + self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, cache_position=None, **kwargs + ): + # With static cache, the `past_key_values` is None + # TODO joao: standardize interface for the different Cache classes and remove of this if + has_static_cache = False + if past_key_values is None: + past_key_values = getattr(getattr(self.model.layers[0], "self_attn", {}), "past_key_value", None) + has_static_cache = past_key_values is not None + + past_length = 0 + if past_key_values is not None: + if isinstance(past_key_values, Cache): + past_length = cache_position[0] if cache_position is not None else past_key_values.get_seq_length() + max_cache_length = ( + torch.tensor(past_key_values.get_max_length(), device=input_ids.device) + if past_key_values.get_max_length() is not None + else None + ) + cache_length = past_length if max_cache_length is None else torch.min(max_cache_length, past_length) + # TODO joao: remove this `else` after `generate` prioritizes `Cache` objects + else: + cache_length = past_length = past_key_values[0][0].shape[2] + max_cache_length = None + + # Keep only the unprocessed tokens: + # 1 - If the length of the attention_mask exceeds the length of input_ids, then we are in a setting where + # some of the inputs are exclusively passed as part of the cache (e.g. when passing input_embeds as + # input) + if attention_mask is not None and attention_mask.shape[1] > input_ids.shape[1]: + input_ids = input_ids[:, -(attention_mask.shape[1] - past_length) :] + # 2 - If the past_length is smaller than input_ids', then input_ids holds all input tokens. We can discard + # input_ids based on the past_length. + elif past_length < input_ids.shape[1]: + input_ids = input_ids[:, past_length:] + # 3 - Otherwise (past_length >= input_ids.shape[1]), let's assume input_ids only has unprocessed tokens. + + # If we are about to go beyond the maximum cache length, we need to crop the input attention mask. + if ( + max_cache_length is not None + and attention_mask is not None + and cache_length + input_ids.shape[1] > max_cache_length + ): + attention_mask = attention_mask[:, -max_cache_length:] + + position_ids = kwargs.get("position_ids", None) + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + # The `contiguous()` here is necessary to have a static stride during decoding. torchdynamo otherwise + # recompiles graphs as the stride of the inputs is a guard. Ref: https://github.com/huggingface/transformers/pull/29114 + # TODO: use `next_tokens` directly instead. + model_inputs = {"input_ids": input_ids.contiguous()} + + input_length = position_ids.shape[-1] if position_ids is not None else input_ids.shape[-1] + if cache_position is None: + cache_position = torch.arange(past_length, past_length + input_length, device=input_ids.device) + else: + cache_position = cache_position[-input_length:] + + if has_static_cache: + past_key_values = None + + model_inputs.update( + { + "position_ids": position_ids, + "cache_position": cache_position, + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "attention_mask": attention_mask, + } + ) + return model_inputs + + @staticmethod + def _reorder_cache(past_key_values, beam_idx): + reordered_past = () + for layer_past in past_key_values: + reordered_past += ( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), + ) + return reordered_past diff --git a/venv/lib/python3.10/site-packages/transformers/models/cohere/tokenization_cohere_fast.py b/venv/lib/python3.10/site-packages/transformers/models/cohere/tokenization_cohere_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..e733a6dfd09541c9e536e377fd0846e3e8da63fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/cohere/tokenization_cohere_fast.py @@ -0,0 +1,701 @@ +# coding=utf-8 +# Copyright 2024 Cohere team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is based on the tokenization_llama_fast.py file in transformers + +import pickle +from typing import Dict, List, Literal, Union + +from tokenizers import processors + +from ...pipelines.conversational import Conversation +from ...tokenization_utils_base import BatchEncoding +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging +from ...utils.versions import require_version + + +require_version("tokenizers>=0.13.3") + +logger = logging.get_logger(__name__) +VOCAB_FILES_NAMES = {"tokenizer_file": "tokenizer.json"} + +PRETRAINED_VOCAB_FILES_MAP = { + "tokenizer_file": { + "Cohere/Command-nightly": "https://huggingface.co/Cohere/Command-nightly/blob/main/tokenizer.json", + }, +} + +# fmt: off +DEFAULT_SYSTEM_PROMPT = "You are Command-R, a brilliant, sophisticated, AI-assistant trained to assist human users by providing thorough responses. You are trained by Cohere." +DEFAULT_RAG_PREAMBLE = """## Task and Context +You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. + +## Style Guide +Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.""" +# fmt: on + + +class CohereTokenizerFast(PreTrainedTokenizerFast): + """ + Construct a Cohere tokenizer. Based on byte-level Byte-Pair-Encoding. + + This uses notably ByteFallback and NFC normalization. + + ```python + >>> from transformers import AutoTokenizer + + >>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01") + >>> tokenizer.encode("Hello this is a test") + [5, 28339, 2075, 1801, 1671, 3282] + ``` + + If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or + call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the + values of the first token and final token of an encoded sequence will not be correct). For more details, checkout + [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation. + + You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since + the model was not pretrained this way, it might yield a decrease in performance. + + + + When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`. + + + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`, *optional*): + Path to the vocabulary file. + merges_file (`str`, *optional*): + Path to the merges file. + tokenizer_file (`str`, *optional*): + [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that + contains everything needed to load the tokenizer. + clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`): + Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like + extra spaces. + unk_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + bos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `""`): + The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. + eos_token (`str` or `tokenizers.AddedToken`, *optional*, defaults to `"<|END_OF_TURN_TOKEN|>"`): + The end of sequence token. + add_bos_token (`bool`, *optional*, defaults to `True`): + Whether or not to add an `bos_token` at the start of sequences. + add_eos_token (`bool`, *optional*, defaults to `False`): + Whether or not to add an `eos_token` at the end of sequences. + use_default_system_prompt (`bool`, *optional*, defaults to `False`): + Whether or not the default system prompt for Cohere tokenizer should be used. + add_prefix_space (`bool`, *optional*, defaults to `False`): + Whether or not the tokenizer should automatically add a prefix space + """ + + vocab_files_names = VOCAB_FILES_NAMES + pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP + padding_side = "left" + model_input_names = ["input_ids", "attention_mask"] + slow_tokenizer_class = None + # No `max_model_input_sizes` + + def __init__( + self, + vocab_file=None, + merges_file=None, + tokenizer_file=None, + clean_up_tokenization_spaces=False, + unk_token="", + bos_token="", + eos_token="<|END_OF_TURN_TOKEN|>", + add_bos_token=True, + add_eos_token=False, + use_default_system_prompt=False, + add_prefix_space=False, + **kwargs, + ): + super().__init__( + vocab_file=vocab_file, + merges_file=merges_file, + tokenizer_file=tokenizer_file, + clean_up_tokenization_spaces=clean_up_tokenization_spaces, + unk_token=unk_token, + bos_token=bos_token, + eos_token=eos_token, + add_bos_token=add_bos_token, + add_eos_token=add_eos_token, + use_default_system_prompt=use_default_system_prompt, + add_prefix_space=add_prefix_space, + **kwargs, + ) + self._add_bos_token = add_bos_token + self._add_eos_token = add_eos_token + self.update_post_processor() + self.use_default_system_prompt = use_default_system_prompt + self.vocab_file = vocab_file + self.grounded_generation_template = kwargs.pop("grounded_generation_template", None) + self.tool_use_template = kwargs.pop("tool_use_template", None) + + # TODO @ArthurZucker this can only work one way for now, to update later-on. Tests should also properly + # check this as they were green before. + pre_tok_state = pickle.dumps(self.backend_tokenizer.pre_tokenizer) + decoder_state = pickle.dumps(self.backend_tokenizer.decoder) + + if add_prefix_space: + pre_tok_state = pre_tok_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true') + decoder_state = decoder_state.replace(b'"add_prefix_space":false', b'"add_prefix_space": true') + self.backend_tokenizer.pre_tokenizer = pickle.loads(pre_tok_state) + self.backend_tokenizer.decoder = pickle.loads(decoder_state) + + self.add_prefix_space = add_prefix_space + + def _batch_encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + if not (self.add_prefix_space or not is_split_into_words): + raise Exception( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with" + " pretokenized inputs." + ) + + return super()._batch_encode_plus(*args, **kwargs) + + def _encode_plus(self, *args, **kwargs) -> BatchEncoding: + is_split_into_words = kwargs.get("is_split_into_words", False) + + if not (self.add_prefix_space or not is_split_into_words): + raise Exception( + f"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with" + " pretokenized inputs." + ) + + return super()._encode_plus(*args, **kwargs) + + def update_post_processor(self): + """ + Updates the underlying post processor with the current `bos_token` and `eos_token`. + """ + bos = self.bos_token + bos_token_id = self.bos_token_id + if bos is None and self.add_bos_token: + raise ValueError("add_bos_token = True but bos_token = None") + + eos = self.eos_token + eos_token_id = self.eos_token_id + if eos is None and self.add_eos_token: + raise ValueError("add_eos_token = True but eos_token = None") + + single = f"{(bos+':0 ') if self.add_bos_token else ''}$A:0{(' '+eos+':0') if self.add_eos_token else ''}" + pair = f"{single}{(' '+bos+':1') if self.add_bos_token else ''} $B:1{(' '+eos+':1') if self.add_eos_token else ''}" + + special_tokens = [] + if self.add_bos_token: + special_tokens.append((bos, bos_token_id)) + if self.add_eos_token: + special_tokens.append((eos, eos_token_id)) + self._tokenizer.post_processor = processors.TemplateProcessing( + single=single, pair=pair, special_tokens=special_tokens + ) + + @property + def add_eos_token(self): + return self._add_eos_token + + @property + def add_bos_token(self): + return self._add_bos_token + + @add_eos_token.setter + def add_eos_token(self, value): + self._add_eos_token = value + self.update_post_processor() + + @add_bos_token.setter + def add_bos_token(self, value): + self._add_bos_token = value + self.update_post_processor() + + @property + def default_chat_template(self): + """ + Cohere Tokenizer uses <|START_OF_TURN_TOKEN|> and <|END_OF_TURN_TOKEN|> to indicate each turn in a chat. + Additioanlly, to indicate the source of the message, <|USER_TOKEN|>, <|CHATBOT_TOKEN|> and <|SYSTEM_TOKEN|> + for user, assitant and system messages respectively. + + The output should look something like: + <|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>{{ preamble }}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>{{ How are you? }}<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>{{ I am doing well! }}<|END_OF_TURN_TOKEN|> + + Use add_generation_prompt to add a prompt for the model to generate a response: + >>> from transformers import AutoTokenizer + >>> tokenizer = AutoTokenizer.from_pretrained("CohereForAI/c4ai-command-r-v01") + >>> messages = [{"role": "user", "content": "Hello, how are you?"}] + >>> tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) + '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + + """ + logger.warning_once( + "\nNo chat template is defined for this tokenizer - using the default template " + f"for the {self.__class__.__name__} class. If the default is not appropriate for " + "your model, please set `tokenizer.chat_template` to an appropriate template. " + "See https://huggingface.co/docs/transformers/main/chat_templating for more information.\n" + ) + default_template = ( + "{{ bos_token }}" + "{% if messages[0]['role'] == 'system' %}" + "{% set loop_messages = messages[1:] %}" # Extract system message if it's present + "{% set system_message = messages[0]['content'] %}" + "{% elif USE_DEFAULT_PROMPT == true %}" + "{% set loop_messages = messages %}" # Or use the default system message if the flag is set + "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}" + "{% else %}" + "{% set loop_messages = messages %}" + "{% set system_message = false %}" + "{% endif %}" + "{% if system_message != false %}" # Start with system message + "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + system_message + '<|END_OF_TURN_TOKEN|>' }}" + "{% endif %}" + "{% for message in loop_messages %}" # Loop over all non-system messages + "{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}" + "{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}" + "{% endif %}" + "{% set content = message['content'] %}" + "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way + "{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}" + "{% elif message['role'] == 'assistant' %}" + "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}" + "{% endif %}" + "{% endfor %}" + "{% if add_generation_prompt %}" + "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}" + "{% endif %}" + ) + default_template = default_template.replace( + "USE_DEFAULT_PROMPT", "true" if self.use_default_system_prompt else "false" + ) + default_message = DEFAULT_SYSTEM_PROMPT.replace("\n", "\\n").replace("'", "\\'") + default_template = default_template.replace("DEFAULT_SYSTEM_MESSAGE", default_message) + + tool_use_template = ( + "{{ bos_token }}" + "{% if messages[0]['role'] == 'system' %}" + "{% set loop_messages = messages[1:] %}" # Extract system message if it's present + "{% set system_message = messages[0]['content'] %}" + "{% else %}" + "{% set loop_messages = messages %}" + "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}" + "{% endif %}" + "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' }}" + "{{ '# Safety Preamble' }}" + "{{ '\nThe instructions in this section override those in the task description and style guide sections. Don\\'t answer questions that are harmful or immoral.' }}" + "{{ '\n\n# System Preamble' }}" + "{{ '\n## Basic Rules' }}" + "{{ '\nYou are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user\\'s requests, you cite your sources in your answers, according to those instructions.' }}" + "{{ '\n\n# User Preamble' }}" + "{{ '\n' + system_message }}" + "{{'\n\n## Available Tools\nHere is a list of tools that you have available to you:\n\n'}}" + "{% for tool in tools %}" + "{% if loop.index0 != 0 %}" + "{{ '\n\n'}}" + "{% endif %}" + "{{'```python\ndef ' + tool.name + '('}}" + "{% for param_name, param_fields in tool.parameter_definitions.items() %}" + "{% if loop.index0 != 0 %}" + "{{ ', '}}" + "{% endif %}" + "{{param_name}}: " + "{% if not param_fields.required %}" + "{{'Optional[' + param_fields.type + '] = None'}}" + "{% else %}" + "{{ param_fields.type }}" + "{% endif %}" + "{% endfor %}" + '{{ \') -> List[Dict]:\n """\'}}' + "{{ tool.description }}" + "{% if tool.parameter_definitions|length != 0 %}" + "{{ '\n\n Args:\n '}}" + "{% for param_name, param_fields in tool.parameter_definitions.items() %}" + "{% if loop.index0 != 0 %}" + "{{ '\n ' }}" + "{% endif %}" + "{{ param_name + ' ('}}" + "{% if not param_fields.required %}" + "{{'Optional[' + param_fields.type + ']'}}" + "{% else %}" + "{{ param_fields.type }}" + "{% endif %}" + "{{ '): ' + param_fields.description }}" + "{% endfor %}" + "{% endif %}" + '{{ \'\n """\n pass\n```\' }}' + "{% endfor %}" + "{{ '<|END_OF_TURN_TOKEN|>'}}" + "{% for message in loop_messages %}" + "{% set content = message['content'] %}" + "{% if message['role'] == 'user' %}" + "{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}" + "{% elif message['role'] == 'system' %}" + "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}" + "{% elif message['role'] == 'assistant' %}" + "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}" + "{% endif %}" + "{% endfor %}" + "{{'<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write \\'Action:\\' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user\\'s last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example:\n```json\n[\n {\n \"tool_name\": title of the tool in the specification,\n \"parameters\": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters\n }\n]```<|END_OF_TURN_TOKEN|>'}}" + "{% if add_generation_prompt %}" + "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}" + "{% endif %}" + ) + default_tool_message = DEFAULT_RAG_PREAMBLE.replace("\n", "\\n").replace("'", "\\'") + tool_use_template = tool_use_template.replace("DEFAULT_SYSTEM_MESSAGE", default_tool_message) + + rag_template = ( + "{{ bos_token }}" + "{% if messages[0]['role'] == 'system' %}" + "{% set loop_messages = messages[1:] %}" # Extract system message if it's present + "{% set system_message = messages[0]['content'] %}" + "{% else %}" + "{% set loop_messages = messages %}" + "{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}" + "{% endif %}" + "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' }}" + "{{ '# Safety Preamble' }}" + "{{ '\nThe instructions in this section override those in the task description and style guide sections. Don\\'t answer questions that are harmful or immoral.' }}" + "{{ '\n\n# System Preamble' }}" + "{{ '\n## Basic Rules' }}" + "{{ '\nYou are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user\\'s requests, you cite your sources in your answers, according to those instructions.' }}" + "{{ '\n\n# User Preamble' }}" + "{{ '\n' + system_message }}" + "{{ '<|END_OF_TURN_TOKEN|>'}}" + "{% for message in loop_messages %}" # Loop over all non-system messages + "{% set content = message['content'] %}" + "{% if message['role'] == 'user' %}" # After all of that, handle messages/roles in a fairly normal way + "{{ '<|START_OF_TURN_TOKEN|><|USER_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}" + "{% elif message['role'] == 'system' %}" + "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}" + "{% elif message['role'] == 'assistant' %}" + "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' + content.strip() + '<|END_OF_TURN_TOKEN|>' }}" + "{% endif %}" + "{% endfor %}" + "{{ '<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>'}}" + "{{ '' }}" + "{% for document in documents %}" # Loop over all non-system messages + "{{ '\nDocument: ' }}" + "{{ loop.index0 }}\n" + "{% for key, value in document.items() %}" + "{{ key }}: {{value}}\n" + "{% endfor %}" + "{% endfor %}" + "{{ ''}}" + "{{ '<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>' }}" + "{{ 'Carefully perform the following instructions, in order, starting each with a new line.\n' }}" + "{{ 'Firstly, Decide which of the retrieved documents are relevant to the user\\'s last input by writing \\'Relevant Documents:\\' followed by comma-separated list of document numbers. If none are relevant, you should instead write \\'None\\'.\n' }}" + "{{ 'Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user\\'s last input by writing \\'Cited Documents:\\' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write \\'None\\'.\n' }}" + "{% if citation_mode=='accurate' %}" + "{{ 'Thirdly, Write \\'Answer:\\' followed by a response to the user\\'s last input in high quality natural english. Use the retrieved documents to help you. Do not insert any citations or grounding markup.\n' }}" + "{% endif %}" + "{{ 'Finally, Write \\'Grounded answer:\\' followed by a response to the user\\'s last input in high quality natural english. Use the symbols and to indicate when a fact comes from a document in the search result, e.g my fact for a fact from document 0.' }}" + "{{ '<|END_OF_TURN_TOKEN|>' }}" + "{% if add_generation_prompt %}" + "{{ '<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>' }}" + "{% endif %}" + ) + default_rag_message = DEFAULT_RAG_PREAMBLE.replace("\n", "\\n").replace("'", "\\'") + rag_template = rag_template.replace("DEFAULT_SYSTEM_MESSAGE", default_rag_message) + + return {"default": default_template, "tool_use": tool_use_template, "rag": rag_template} + + def apply_tool_use_template( + self, + conversation: Union[List[Dict[str, str]], "Conversation"], + tools: List[Dict], + **kwargs, + ) -> Union[str, List[int]]: + """Create a Command-R tool-use prompt. + + Once rendered, the prompt instructs the model to generate a list of actions to perform on a set of user supplied tools + to help carry out the user's requests. + + Conceptually, this works in the same way as `apply_chat_format`, but takes an additional `tools` parameter. + + Converts a Conversation object or a list of dictionaries with `"role"` and `"content"` keys and a list of available + tools for the model to use into a prompt string, or a list of token ids. + This method will use the tokenizer's `default_tool_use_template` template specified at the class level. + You can override the default template using the `tool_use_template` kwarg but the quality of your results may decrease. + + Args: + conversation (Union[List[Dict[str, str]], "Conversation"]): A Conversation object or list of dicts + with "role" and "content" keys, representing the chat history so far. + tools (List[Dict]): a list of tools to render into the prompt for the model to choose from. + See an example at the bottom of the docstring. + The format should be: + * name (str): The name of the tool to be called. Valid names contain only the characters a-z, + A-Z, 0-9, _ and must not begin with a digit. + * description (str): The description of what the tool does, the model uses the description to + choose when and how to call the function. + * parameter_definitions (List[Dict]): The input parameters of the tool. Accepts a dictionary + where the key is the name of the parameter and the value is the parameter spec. + Valid parameter names contain only the characters a-z, A-Z, 0-9, _ and must not begin with a digit. + Parameter specs are as follows: + * description (str): The description of the parameter. + * type (str): the type of the parameter - most effective for python builtin data types, such as 'str', 'bool' + * required: boolean: Denotes whether the parameter is always present (required) or not. Defaults to not required. + add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate + the start of an assistant message. This is useful when you want to generate a response from the model. + Note that this argument will be passed to the chat template, and so it must be supported in the + template for this argument to have any effect. + tokenize (`bool`, defaults to `True`): + Whether to tokenize the output. If `False`, the output will be a string. + padding (`bool`, defaults to `False`): + Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`. + truncation (`bool`, defaults to `False`): + Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`. + max_length (`int`, *optional*): + Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If + not specified, the tokenizer's `max_length` attribute will be used as a default. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable + values are: + - `'tf'`: Return TensorFlow `tf.Tensor` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + return_dict (`bool`, *optional*, defaults to `False`): + Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`. + **tokenizer_kwargs: Additional kwargs to pass to the tokenizer. + + Returns: + `str`: A rendered prompt string. + or if tokenize=True: + `List[int]`: A list of token ids representing the tokenized chat so far, including control tokens. This + output is ready to pass to the model, either directly or via methods like `generate()`. + + Examples: + + ```python + >> tokenizer = CohereTokenizerFast.from_pretrained("CohereForAI/c4ai-command-r-v01") + >> tools = [ + { + "name": "internet_search", + "description": "Returns a list of relevant document snippets for a textual query retrieved from the internet", + "parameter_definitions": { + "query": { + "description": "Query to search the internet with", + "type": "str", + "required": True + } + } + }, + { + "name': "directly_answer", + "description": "Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history", + "parameter_definitions": {} + } + ] + >> conversation = [ + {"role": "user", "content": "Whats the biggest penguin in the world?"} + ] + >> # render the prompt, ready for user to inspect, or for input into the model: + >> prompt = tokenizer.apply_tool_use_template(conversation, tools=tools, tokenize=False, add_generation_prompt=True) + >> print(prompt) + <|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble + The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral. + + # System Preamble + ## Basic Rules + You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions. + + # User Preamble + ## Task and Context + You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. + + ## Style Guide + Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling. + + ## Available Tools + Here is a list of tools that you have available to you: + + \\`\\`\\`python + def internet_search(query: str) -> List[Dict]: + \"\"\"Returns a list of relevant document snippets for a textual query retrieved from the internet + + Args: + query (str): Query to search the internet with + \"\"\" + pass + \\`\\`\\` + + \\`\\`\\`python + def directly_answer() -> List[Dict]: + \"\"\"Calls a standard (un-augmented) AI chatbot to generate a response given the conversation history + \"\"\" + pass + \\`\\`\\`<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Write 'Action:' followed by a json-formatted list of actions that you want to perform in order to produce a good response to the user's last input. You can use any of the supplied tools any number of times, but you should aim to execute the minimum number of necessary actions for the input. You should use the `directly-answer` tool if calling the other tools is unnecessary. The list of actions you want to call should be formatted as a list of json objects, for example: + \\`\\`\\`json + [ + { + "tool_name": title of the tool in the specification, + "parameters": a dict of parameters to input into the tool as they are defined in the specs, or {} if it takes no parameters + } + ]\\`\\`\\`<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|> + ``` + >> inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors='pt') + >> outputs = model.generate(inputs, max_new_tokens=128) + >> print(tokenizer.decode(outputs[0])) + Action: ```json + [ + { + "tool_name": "internet_search", + "parameters": { + "query": "biggest penguin in the world" + } + } + ] + ``` + """ + return self.apply_chat_template( + conversation, + chat_template="tool_use", + tools=tools, + **kwargs, + ) + + def apply_grounded_generation_template( + self, + conversation: Union[List[Dict[str, str]], "Conversation"], + documents: List[Dict], + citation_mode: Literal["fast", "accurate"] = "accurate", + **kwargs, + ) -> Union[str, List[int]]: + """Create a Command-R grounded generation (aka RAG) prompt. + + Once rendered, the prompt instructs the model to generate a response with citations in, based on supplied documents. + + Conceptually, this works in the same way as `apply_chat_format`, but takes additional `documents` + and parameter `citation_mode` parameters. + + Converts a Conversation object or a list of dictionaries with `"role"` and `"content"` keys and a list of + documents for the model to ground its response on into a prompt string, or a list of token ids. + This method will use the tokenizer's `grounded_generation_template` template specified at the class level. + You can override the default template using the `grounded_generation_template` kwarg but the quality of your results may decrease. + + Args: + conversation (Union[List[Dict[str, str]], "Conversation"]): A Conversation object or list of dicts + with "role" and "content" keys, representing the chat history so far. + documents (List[Dict[str, str]): A list of dicts, representing documents or tool outputs to ground your + generation on. A document is a semistructured dict, wiht a string to string mapping. Common fields are + `url`, `title`, `snippet` etc but should be descriptive of the key. They will get rendered into the prompt. + citation_mode: either "accurate" (prompt the model to generate an answer first, then rewrite it with citation + spans in) or "fast", where the prompt instructs the model to generate an answer with citations in directly. + The former has higher quality citations, the latter requires fewer tokens to be generated. + add_generation_prompt (bool, *optional*): Whether to end the prompt with the token(s) that indicate + the start of an assistant message. This is useful when you want to generate a response from the model. + Note that this argument will be passed to the chat template, and so it must be supported in the + template for this argument to have any effect. + tokenize (`bool`, defaults to `True`): + Whether to tokenize the output. If `False`, the output will be a string. + padding (`bool`, defaults to `False`): + Whether to pad sequences to the maximum length. Has no effect if tokenize is `False`. + truncation (`bool`, defaults to `False`): + Whether to truncate sequences at the maximum length. Has no effect if tokenize is `False`. + max_length (`int`, *optional*): + Maximum length (in tokens) to use for padding or truncation. Has no effect if tokenize is `False`. If + not specified, the tokenizer's `max_length` attribute will be used as a default. + return_tensors (`str` or [`~utils.TensorType`], *optional*): + If set, will return tensors of a particular framework. Has no effect if tokenize is `False`. Acceptable + values are: + - `'tf'`: Return TensorFlow `tf.Tensor` objects. + - `'pt'`: Return PyTorch `torch.Tensor` objects. + - `'np'`: Return NumPy `np.ndarray` objects. + - `'jax'`: Return JAX `jnp.ndarray` objects. + return_dict (`bool`, *optional*, defaults to `False`): + Whether to return a dictionary with named outputs. Has no effect if tokenize is `False`. + **tokenizer_kwargs: Additional kwargs to pass to the tokenizer. + + Returns: + `str`: A rendered prompt string. + or if tokenize=True: + `List[int]`: A list of token ids representing the tokenized chat so far, including control tokens. This + output is ready to pass to the model, either directly or via methods like `generate()`. + + Examples: + + ```python + >> tokenizer = CohereTokenizerFast.from_pretrained('CohereForAI/c4ai-command-r-v01') + + >> # define documents: + >> documents = [ + { "title": "Tall penguins", "text": "Emperor penguins are the tallest." }, + { "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica."} + ] + >> # define a conversation: + >> conversation = [ + {"role": "user", "content": "Whats the biggest penguin in the world?"} + ] + >> # render the prompt, ready for user to inspect, or for input into the model: + >> grounded_generation_prompt = tokenizer.apply_grounded_generation_template(conversation, documents=documents, tokenize=False, add_generation_prompt=True) + >> print(grounded_generation_prompt) + <|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|># Safety Preamble + The instructions in this section override those in the task description and style guide sections. Don't answer questions that are harmful or immoral. + + ## Basic Rules + You are a powerful conversational AI trained by Cohere to help people. You are augmented by a number of tools, and your job is to use and consume the output of these tools to best help the user. You will see a conversation history between yourself and a user, ending with an utterance from the user. You will then see a specific instruction instructing you what kind of response to generate. When you answer the user's requests, you cite your sources in your answers, according to those instructions. + + # User Preamble + ## Task and Context + You help people answer their questions and other requests interactively. You will be asked a very wide array of requests on all kinds of topics. You will be equipped with a wide range of search engines or similar tools to help you, which you use to research your answer. You should focus on serving the user's needs as best you can, which will be wide-ranging. + + ## Style Guide + Unless the user asks for a different style of answer, you should answer in full sentences, using proper grammar and spelling.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Whats the biggest penguin in the world?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|> + Document: 0 + title: Tall penguins + text: Emperor penguins are the tallest. + + Document: 1 + title: Penguin habitats + text: Emperor penguins only live in Antarctica. + <|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>Carefully perform the following instructions, in order, starting each with a new line. + Firstly, Decide which of the retrieved documents are relevant to the user's last input by writing 'Relevant Documents:' followed by comma-separated list of document numbers. If none are relevant, you should instead write 'None'. + Secondly, Decide which of the retrieved documents contain facts that should be cited in a good answer to the user's last input by writing 'Cited Documents:' followed a comma-separated list of document numbers. If you dont want to cite any of them, you should instead write 'None'. + Thirdly, Write 'Answer:' followed by a response to the user's last input in high quality natural english. Use the retrieved documents to help you. Do not insert any citations or grounding markup. + Finally, Write 'Grounded answer:' followed by a response to the user's last input in high quality natural english. Use the symbols and to indicate when a fact comes from a document in the search result, e.g my fact for a fact from document 0.<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>''' + ``` + >> inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors='pt') + >> outputs = model.generate(inputs, max_new_tokens=128) + >> print(tokenizer.decode(outputs[0])) + Relevant Documents: 0,1 + Cited Documents: 0,1 + Answer: The Emperor Penguin is the tallest or biggest penguin in the world. It is a bird that lives only in Antarctica and grows to a height of around 122 centimetres. + Grounded answer: The Emperor Penguin is the tallest or biggest penguin in the world. It is a bird that lives only in Antarctica and grows to a height of around 122 centimetres. + """ + return self.apply_chat_template( + conversation, + chat_template="rag", + documents=documents, + citation_mode=citation_mode, + **kwargs, + ) + + # TODO ArthurZ let's rely on the template processor instead, refactor all fast tokenizers + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + bos_token_id = [self.bos_token_id] if self.add_bos_token else [] + eos_token_id = [self.eos_token_id] if self.add_eos_token else [] + + output = bos_token_id + token_ids_0 + eos_token_id + + if token_ids_1 is not None: + output = output + bos_token_id + token_ids_1 + eos_token_id + + return output diff --git a/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1980f94955e257a591c0a20efe5aea85c322296 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/configuration_fsmt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/configuration_fsmt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2051c5b69f8744aa836bbf50545fd44d224c592e Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/configuration_fsmt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/convert_fsmt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/convert_fsmt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e42879071d9332dc3a9b0d655ca39e7954574b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/convert_fsmt_original_pytorch_checkpoint_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/tokenization_fsmt.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/tokenization_fsmt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65664a541dcca7d13d744c8434d162c0dc33d5dc Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/fsmt/__pycache__/tokenization_fsmt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..02ca0a11949b73ecef0329412d869ce1996d1bc6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/__init__.py @@ -0,0 +1,85 @@ +# Copyright 2021 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available + + +_import_structure = { + "configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"], +} + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_gpt_neo"] = [ + "GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST", + "GPTNeoForCausalLM", + "GPTNeoForQuestionAnswering", + "GPTNeoForSequenceClassification", + "GPTNeoForTokenClassification", + "GPTNeoModel", + "GPTNeoPreTrainedModel", + "load_tf_weights_in_gpt_neo", + ] + +try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_flax_gpt_neo"] = [ + "FlaxGPTNeoForCausalLM", + "FlaxGPTNeoModel", + "FlaxGPTNeoPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_gpt_neo import ( + GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST, + GPTNeoForCausalLM, + GPTNeoForQuestionAnswering, + GPTNeoForSequenceClassification, + GPTNeoForTokenClassification, + GPTNeoModel, + GPTNeoPreTrainedModel, + load_tf_weights_in_gpt_neo, + ) + + try: + if not is_flax_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/convert_gpt_neo_mesh_tf_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/convert_gpt_neo_mesh_tf_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..30116396f45e874312776544c5c6303a6d4c5bb4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/__pycache__/convert_gpt_neo_mesh_tf_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/configuration_gpt_neo.py b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/configuration_gpt_neo.py new file mode 100644 index 0000000000000000000000000000000000000000..411b392180b018ca027d08138f363a7335e9d384 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/configuration_gpt_neo.py @@ -0,0 +1,272 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" GPT Neo model configuration""" + +from collections import OrderedDict +from typing import Any, Mapping, Optional + +from ... import PreTrainedTokenizer, TensorType, is_torch_available +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfigWithPast +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class GPTNeoConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`GPTNeoModel`]. It is used to instantiate a GPT + Neo model according to the specified arguments, defining the model architecture. Instantiating a configuration with + the defaults will yield a similar configuration to that of the GPTNeo + [EleutherAI/gpt-neo-1.3B](https://huggingface.co/EleutherAI/gpt-neo-1.3B) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 50257): + Vocabulary size of the GPT Neo model. Defines the number of different tokens that can be represented by the + `inputs_ids` passed when calling [`GPTNeoModel`]. Vocabulary size of the model. Defines the different + tokens that can be represented by the *inputs_ids* passed to the forward method of [`GPTNeoModel`]. + max_position_embeddings (`int`, *optional*, defaults to 2048): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + hidden_size (`int`, *optional*, defaults to 2048): + Dimensionality of the encoder layers and the pooler layer. + num_layers (`int`, *optional*, defaults to 24): + Number of hidden layers in the Transformer encoder. + attention_types (`List`, *optional*, defaults to `[[['global', 'local'], 12]]`): + The type of attention for each layer in a `List` of the following format `[[["attention_type"], + num_layerss]]` e.g. for a 24 layer model `[[["global"], 24]]` or `[[["global", "local"], 12]]` Choose the + value of `attention_type` from `["global", "local"]` + num_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 8192): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + window_size (`int`, *optional*, defaults to 256): + The size of the sliding window for local attention. + activation_function (`str` or `function`, *optional*, defaults to `"gelu_new"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + resid_dropout (`float`, *optional*, defaults to 0.0): + Residual dropout used in the attention pattern. + embed_dropout (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_dropout (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + classifier_dropout (`float`, *optional*, defaults to 0.1): + Argument used when doing token classification, used in the model [`GPTNeoForTokenClassification`]. The + dropout ratio for the hidden layer. + layer_norm_epsilon (`float`, *optional*, defaults to 1e-05): + The epsilon used by the layer normalization layers. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + use_cache (`bool`, *optional*, defaults to `True`): + Whether or not the model should return the last key/values attentions (not used by all models). Only + relevant if `config.is_decoder=True`. + bos_token_id (`int`, *optional*, defaults to 50256): + The id of the beginning of sentence token in the vocabulary. + eos_token_id (`int`, *optional*, defaults to 50256): + The id of the end of sentence token in the vocabulary. + + Example: + + ```python + >>> from transformers import GPTNeoConfig, GPTNeoModel + + >>> # Initializing a GPTNeo EleutherAI/gpt-neo-1.3B style configuration + >>> configuration = GPTNeoConfig() + + >>> # Initializing a model (with random weights) from the EleutherAI/gpt-neo-1.3B style configuration + >>> model = GPTNeoModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "gpt_neo" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = {"num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} + + def __init__( + self, + vocab_size=50257, + max_position_embeddings=2048, + hidden_size=2048, + num_layers=24, + attention_types=[[["global", "local"], 12]], + num_heads=16, + intermediate_size=None, + window_size=256, + activation_function="gelu_new", + resid_dropout=0.0, + embed_dropout=0.0, + attention_dropout=0.0, + classifier_dropout=0.1, + layer_norm_epsilon=1e-5, + initializer_range=0.02, + use_cache=True, + bos_token_id=50256, + eos_token_id=50256, + **kwargs, + ): + self.vocab_size = vocab_size + self.max_position_embeddings = max_position_embeddings + self.hidden_size = hidden_size + self.num_layers = num_layers + self.num_heads = num_heads + self.intermediate_size = intermediate_size + self.window_size = window_size + self.activation_function = activation_function + self.resid_dropout = resid_dropout + self.embed_dropout = embed_dropout + self.attention_dropout = attention_dropout + self.classifier_dropout = classifier_dropout + self.layer_norm_epsilon = layer_norm_epsilon + self.initializer_range = initializer_range + self.use_cache = use_cache + + self.bos_token_id = bos_token_id + self.eos_token_id = eos_token_id + + self.attention_types = attention_types + self.attention_layers = self.expand_attention_types_params(attention_types) + + if len(self.attention_layers) != self.num_layers: + raise ValueError( + "Configuration for convolutional module is incorrect. " + "It is required that `len(config.attention_layers)` == `config.num_layers` " + f"but is `len(config.attention_layers) = {len(self.attention_layers)}`, " + f"`config.num_layers = {self.num_layers}`. " + "`config.attention_layers` is prepared using `config.attention_types`. " + "Please verify the value of `config.attention_types` argument." + ) + + super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + + @staticmethod + def expand_attention_types_params(attention_types): + attentions = [] + for item in attention_types: + for _ in range(item[1]): + attentions.extend(item[0]) + return attentions + + +def custom_unfold(input, dimension, size, step): + """Custom torch.Tensor.unfold implementation to enable the export to ONNX.""" + import torch + + shape = input.size() + rank = len(shape) + sizedim = shape[dimension] + + low_indices = torch.arange(0, sizedim, step) + min_length = torch.div(sizedim - size, step, rounding_mode="floor") + 1 + indices = torch.arange(size) + low_indices[:min_length][:, None] + + s = [slice(None)] * rank + s[dimension] = indices + sliced = input[s] + + perm = list(range(0, rank + 1)) + perm.append(perm.pop(dimension + 1)) + + return sliced.permute(perm) + + +def custom_get_block_length_and_num_blocks(seq_length, window_size): + """ + Custom implementation for GPTNeoAttentionMixin._get_block_length_and_num_blocks to enable the export to ONNX as + original implementation uses Python variables and control flow. + """ + import torch + + candidates = torch.arange(1, window_size) + remainders = torch.remainder(seq_length, candidates) + divisor_indices = remainders == 0 + divisors = candidates[divisor_indices] + largest_divisor = torch.max(divisors) + return largest_divisor, torch.div(seq_length, largest_divisor, rounding_mode="floor") + + +class GPTNeoOnnxConfig(OnnxConfigWithPast): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + common_inputs = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}}) + if self.use_past: + self.fill_with_past_key_values_(common_inputs, direction="inputs") + common_inputs["attention_mask"] = {0: "batch", 1: "past_sequence + sequence"} + else: + common_inputs["attention_mask"] = {0: "batch", 1: "sequence"} + + return common_inputs + + @property + def num_attention_heads(self) -> int: + return self._config.num_heads + + def generate_dummy_inputs( + self, + tokenizer: PreTrainedTokenizer, + batch_size: int = -1, + seq_length: int = -1, + is_pair: bool = False, + framework: Optional[TensorType] = None, + ) -> Mapping[str, Any]: + common_inputs = super(OnnxConfigWithPast, self).generate_dummy_inputs( + tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework + ) + + # We need to order the input in the way they appears in the forward() + ordered_inputs = OrderedDict({"input_ids": common_inputs["input_ids"]}) + + # Need to add the past_keys + if self.use_past: + if not is_torch_available(): + raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") + else: + import torch + + batch, seqlen = common_inputs["input_ids"].shape + # Not using the same length for past_key_values + past_key_values_length = seqlen + 2 + past_shape = ( + batch, + self.num_attention_heads, + past_key_values_length, + self._config.hidden_size // self.num_attention_heads, + ) + ordered_inputs["past_key_values"] = [ + (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(self.num_layers) + ] + + ordered_inputs["attention_mask"] = common_inputs["attention_mask"] + if self.use_past: + mask_dtype = ordered_inputs["attention_mask"].dtype + ordered_inputs["attention_mask"] = torch.cat( + [ordered_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 + ) + + return ordered_inputs + + @property + def default_onnx_opset(self) -> int: + return 13 diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py new file mode 100644 index 0000000000000000000000000000000000000000..4a5fddd0a9d0f95b83777ebc9207a40811940535 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/convert_gpt_neo_mesh_tf_to_pytorch.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# Copyright 2021 The Eleuther AI and HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert GPT Neo checkpoint.""" + + +import argparse +import json + +from transformers import GPTNeoConfig, GPTNeoForCausalLM, load_tf_weights_in_gpt_neo +from transformers.utils import logging + + +logging.set_verbosity_info() + + +def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path): + # Initialise PyTorch model + config_json = json.load(open(config_file, "r")) + config = GPTNeoConfig( + hidden_size=config_json["n_embd"], + num_layers=config_json["n_layer"], + num_heads=config_json["n_head"], + attention_types=config_json["attention_types"], + max_position_embeddings=config_json["n_positions"], + resid_dropout=config_json["res_dropout"], + embed_dropout=config_json["embed_dropout"], + attention_dropout=config_json["attn_dropout"], + ) + print(f"Building PyTorch model from configuration: {config}") + model = GPTNeoForCausalLM(config) + + # Load weights from tf checkpoint + load_tf_weights_in_gpt_neo(model, config, tf_checkpoint_path) + + # Save pytorch-model + print(f"Save PyTorch model to {pytorch_dump_path}") + model.save_pretrained(pytorch_dump_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." + ) + parser.add_argument( + "--config_file", + default=None, + type=str, + required=True, + help=( + "The config json file corresponding to the pre-trained mesh-tf model. \n" + "This specifies the model architecture." + ), + ) + parser.add_argument( + "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." + ) + args = parser.parse_args() + convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/modeling_flax_gpt_neo.py b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/modeling_flax_gpt_neo.py new file mode 100644 index 0000000000000000000000000000000000000000..5639ca50f166a272968b497df696d15410f180ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/modeling_flax_gpt_neo.py @@ -0,0 +1,684 @@ +# coding=utf-8 +# Copyright 2021 The Eleuther AI and The Google Flax Team Authors and The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from functools import partial +from typing import Optional, Tuple + +import flax.linen as nn +import jax +import jax.numpy as jnp +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.linen import combine_masks, make_causal_mask +from flax.linen.attention import dot_product_attention_weights +from flax.traverse_util import flatten_dict, unflatten_dict +from jax import lax + +from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxCausalLMOutput +from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring +from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging +from .configuration_gpt_neo import GPTNeoConfig + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "GPTNeoConfig" +_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neo-1.3B" + + +GPT_NEO_START_DOCSTRING = r""" + + This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a Flax Linen + [flax.nn.Module](https://flax.readthedocs.io/en/latest/_autosummary/flax.nn.module.html) subclass. Use it as a + regular Flax Module and refer to the Flax documentation for all matter related to general usage and behavior. + + Finally, this model supports inherent JAX features such as: + + - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) + - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) + - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) + - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) + + Parameters: + config ([`GPTNeoConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. + dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`): + The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and + `jax.numpy.bfloat16` (on TPUs). + + This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If + specified all the computation will be performed with the given `dtype`. + + **Note that this only specifies the dtype of the computation and does not influence the dtype of model + parameters.** + + If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and + [`~FlaxPreTrainedModel.to_bf16`]. +""" + +GPT_NEO_INPUTS_DOCSTRING = r""" + Args: + input_ids (`numpy.ndarray` of shape `(batch_size, input_ids_length)`): + `input_ids_length` = `sequence_length`. Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + past_key_values (`Dict[str, np.ndarray]`, *optional*, returned by `init_cache` or when passing previous `past_key_values`): + Dictionary of pre-computed hidden-states (key and values in the attention blocks) that can be used for fast + auto-regressive decoding. Pre-computed key and value hidden-states are of shape *[batch_size, max_length]*. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +class FlaxGPTNeoSelfAttention(nn.Module): + config: GPTNeoConfig + attention_type: str + dtype: jnp.dtype = jnp.float32 + + def setup(self): + config = self.config + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and " + f"`num_heads`: {self.num_heads})." + ) + + self.attn_dropout = nn.Dropout(config.attention_dropout) + self.resid_dropout = nn.Dropout(config.resid_dropout) + + dense = partial( + nn.Dense, + self.embed_dim, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.initializer_range), + ) + + self.q_proj, self.k_proj, self.v_proj = dense(use_bias=False), dense(use_bias=False), dense(use_bias=False) + self.out_proj = dense() + + self.causal_mask = make_causal_mask(jnp.ones((1, config.max_position_embeddings), dtype="bool"), dtype="bool") + if self.attention_type == "local": + self.causal_mask = self.causal_mask ^ jnp.tril(self.causal_mask, -config.window_size) + + def _split_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim)) + + def _merge_heads(self, hidden_states): + return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,)) + + @nn.compact + def _concatenate_to_cache(self, key, value, query, attention_mask): + """ + This function takes projected key, value states from a single input token and concatenates the states to cached + states from previous steps. This function is slighly adapted from the official Flax repository: + https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 + """ + # detect if we're initializing by absence of existing cache data. + is_initialized = self.has_variable("cache", "cached_key") + cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) + cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) + cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) + + if is_initialized: + *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape + # update key, value caches with our new 1d spatial slices + cur_index = cache_index.value + indices = (0,) * len(batch_dims) + (cur_index, 0, 0) + key = lax.dynamic_update_slice(cached_key.value, key, indices) + value = lax.dynamic_update_slice(cached_value.value, value, indices) + cached_key.value = key + cached_value.value = value + num_updated_cache_vectors = query.shape[1] + cache_index.value = cache_index.value + num_updated_cache_vectors + # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. + pad_mask = jnp.broadcast_to( + jnp.arange(max_length) < cur_index + num_updated_cache_vectors, + tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), + ) + attention_mask = combine_masks(pad_mask, attention_mask) + return key, value, attention_mask + + def __call__( + self, + hidden_states, + attention_mask=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + ): + query = self.q_proj(hidden_states) * jnp.sqrt(self.head_dim).astype(self.dtype) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + query = self._split_heads(query) + key = self._split_heads(key) + value = self._split_heads(value) + + query_length, key_length = query.shape[1], key.shape[1] + + if self.has_variable("cache", "cached_key"): + mask_shift = self.variables["cache"]["cache_index"] + max_decoder_length = self.variables["cache"]["cached_key"].shape[1] + causal_mask = lax.dynamic_slice( + self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) + ) + else: + causal_mask = self.causal_mask[:, :, :query_length, :key_length] + + batch_size = hidden_states.shape[0] + causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) + + attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) + attention_mask = combine_masks(attention_mask, causal_mask) + + dropout_rng = None + if not deterministic and self.config.attention_dropout > 0.0: + dropout_rng = self.make_rng("dropout") + + # During fast autoregressive decoding, we feed one position at a time, + # and cache the keys and values step by step. + if self.has_variable("cache", "cached_key") or init_cache: + key, value, attention_mask = self._concatenate_to_cache(key, value, query, attention_mask) + + # transform boolean mask into float mask + attention_bias = lax.select( + attention_mask > 0, + jnp.full(attention_mask.shape, 0.0).astype(self.dtype), + jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), + ) + + # usual dot product attention + attn_weights = dot_product_attention_weights( + query, + key, + bias=attention_bias, + dropout_rng=dropout_rng, + dropout_rate=self.config.attention_dropout, + deterministic=deterministic, + dtype=self.dtype, + precision=None, + ) + + attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value) + attn_output = self._merge_heads(attn_output) + attn_output = self.out_proj(attn_output) + attn_output = self.resid_dropout(attn_output, deterministic=deterministic) + + outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) + return outputs + + +class FlaxGPTNeoAttention(nn.Module): + config: GPTNeoConfig + layer_id: int = 0 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + attention_type = self.config.attention_layers[self.layer_id] + self.attention = FlaxGPTNeoSelfAttention(self.config, attention_type, dtype=self.dtype) + + def __call__( + self, + hidden_states, + attention_mask=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + ): + return self.attention( + hidden_states, + attention_mask=attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + ) + + +class FlaxGPTNeoMLP(nn.Module): + config: GPTNeoConfig + intermediate_size: int + dtype: jnp.dtype = jnp.float32 + + def setup(self): + embed_dim = self.config.hidden_size + kernel_init = jax.nn.initializers.normal(self.config.initializer_range) + self.c_fc = nn.Dense(self.intermediate_size, dtype=self.dtype, kernel_init=kernel_init) + self.c_proj = nn.Dense(embed_dim, dtype=self.dtype, kernel_init=kernel_init) + self.act = ACT2FN[self.config.activation_function] + self.dropout = nn.Dropout(rate=self.config.resid_dropout) + + def __call__(self, hidden_states, deterministic: bool = True): + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = self.dropout(hidden_states, deterministic=deterministic) + return hidden_states + + +class FlaxGPTNeoBlock(nn.Module): + config: GPTNeoConfig + layer_id: int = 0 + dtype: jnp.dtype = jnp.float32 + + def setup(self): + hidden_size = self.config.hidden_size + inner_dim = self.config.intermediate_size if self.config.intermediate_size is not None else 4 * hidden_size + + self.ln_1 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) + self.attn = FlaxGPTNeoAttention(self.config, layer_id=self.layer_id, dtype=self.dtype) + self.ln_2 = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) + self.mlp = FlaxGPTNeoMLP(self.config, inner_dim, dtype=self.dtype) + + def __call__( + self, + hidden_states, + attention_mask=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + ): + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + outputs = self.attn( + hidden_states, + attention_mask=attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + ) + # residual connection + attn_output = outputs[0] + hidden_states = attn_output + residual + + residual = hidden_states + hidden_states = self.ln_2(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states, deterministic=deterministic) + # residual connection + hidden_states = residual + feed_forward_hidden_states + + return (hidden_states,) + outputs[1:] + + +class FlaxGPTNeoPreTrainedModel(FlaxPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GPTNeoConfig + base_model_prefix = "transformer" + module_class: nn.Module = None + + def __init__( + self, + config: GPTNeoConfig, + input_shape: Tuple = (1, 1), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + _do_init: bool = True, + **kwargs, + ): + module = self.module_class(config=config, dtype=dtype, **kwargs) + super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) + + def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: + # init input tensors + input_ids = jnp.zeros(input_shape, dtype="i4") + attention_mask = jnp.ones_like(input_ids) + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape) + params_rng, dropout_rng = jax.random.split(rng) + rngs = {"params": params_rng, "dropout": dropout_rng} + + random_params = self.module.init(rngs, input_ids, attention_mask, position_ids, return_dict=False)["params"] + + if params is not None: + random_params = flatten_dict(unfreeze(random_params)) + params = flatten_dict(unfreeze(params)) + for missing_key in self._missing_keys: + params[missing_key] = random_params[missing_key] + self._missing_keys = set() + return freeze(unflatten_dict(params)) + else: + return random_params + + def init_cache(self, batch_size, max_length): + r""" + Args: + batch_size (`int`): + batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. + max_length (`int`): + maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized + cache. + """ + # init input variables to retrieve cache + input_ids = jnp.ones((batch_size, max_length)) + attention_mask = jnp.ones_like(input_ids) + position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) + + init_variables = self.module.init( + jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True + ) + return unfreeze(init_variables["cache"]) + + @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) + def __call__( + self, + input_ids, + attention_mask=None, + position_ids=None, + params: dict = None, + past_key_values: dict = None, + dropout_rng: jax.random.PRNGKey = None, + train: bool = False, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ): + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.return_dict + + batch_size, sequence_length = input_ids.shape + + if position_ids is None: + if past_key_values is not None: + raise ValueError("Make sure to provide `position_ids` when passing `past_key_values`.") + + position_ids = jnp.broadcast_to(jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)) + + if attention_mask is None: + attention_mask = jnp.ones((batch_size, sequence_length)) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be changed by FlaxGPTNeoAttention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + outputs = self.module.apply( + inputs, + jnp.array(input_ids, dtype="i4"), + jnp.array(attention_mask, dtype="i4"), + jnp.array(position_ids, dtype="i4"), + not train, + False, + output_attentions, + output_hidden_states, + return_dict, + rngs=rngs, + mutable=mutable, + ) + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs, past_key_values = outputs + outputs["past_key_values"] = unfreeze(past_key_values["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs, past_key_values = outputs + outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] + + return outputs + + +class FlaxGPTNeoBlockCollection(nn.Module): + config: GPTNeoConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.blocks = [ + FlaxGPTNeoBlock(self.config, layer_id=i, name=str(i), dtype=self.dtype) + for i in range(self.config.num_hidden_layers) + ] + + def __call__( + self, + hidden_states, + attention_mask=None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + all_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + + for block in self.blocks: + if output_hidden_states: + all_hidden_states += (hidden_states,) + + layer_outputs = block( + hidden_states, + attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions += (layer_outputs[1],) + + # this contains possible `None` values - `FlaxGPTNeoModule` will filter them out + outputs = (hidden_states, all_hidden_states, all_attentions) + + return outputs + + +class FlaxGPTNeoModule(nn.Module): + config: GPTNeoConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.embed_dim = self.config.hidden_size + embedding_init = jax.nn.initializers.normal(stddev=self.config.initializer_range) + self.wte = nn.Embed( + self.config.vocab_size, + self.embed_dim, + embedding_init=embedding_init, + ) + self.wpe = nn.Embed( + self.config.max_position_embeddings, + self.embed_dim, + embedding_init=embedding_init, + ) + self.dropout = nn.Dropout(rate=self.config.embed_dropout) + self.h = FlaxGPTNeoBlockCollection(self.config, dtype=self.dtype) + self.ln_f = nn.LayerNorm(epsilon=self.config.layer_norm_epsilon, dtype=self.dtype) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + deterministic=True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + input_embeds = self.wte(input_ids.astype("i4")) + position_embeds = self.wpe(position_ids.astype("i4")) + + hidden_states = input_embeds + position_embeds + hidden_states = self.dropout(hidden_states, deterministic=deterministic) + + outputs = self.h( + hidden_states, + attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + hidden_states = self.ln_f(hidden_states) + + hidden_states = outputs[0] + hidden_states = self.ln_f(hidden_states) + + if output_hidden_states: + all_hidden_states = outputs[1] + (hidden_states,) + outputs = (hidden_states, all_hidden_states) + outputs[2:] + else: + outputs = (hidden_states,) + outputs[1:] + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=outputs[1], + attentions=outputs[-1], + ) + + +@add_start_docstrings( + "The bare GPTNeo Model transformer outputting raw hidden-states without any specific head on top.", + GPT_NEO_START_DOCSTRING, +) +class FlaxGPTNeoModel(FlaxGPTNeoPreTrainedModel): + module_class = FlaxGPTNeoModule + + +append_call_sample_docstring(FlaxGPTNeoModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutput, _CONFIG_FOR_DOC) + + +class FlaxGPTNeoForCausalLMModule(nn.Module): + config: GPTNeoConfig + dtype: jnp.dtype = jnp.float32 + + def setup(self): + self.transformer = FlaxGPTNeoModule(self.config, dtype=self.dtype) + self.lm_head = nn.Dense( + self.config.vocab_size, + use_bias=False, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), + ) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + outputs = self.transformer( + input_ids, + attention_mask, + position_ids, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = outputs[0] + + if self.config.tie_word_embeddings: + shared_kernel = self.transformer.variables["params"]["wte"]["embedding"].T + lm_logits = self.lm_head.apply({"params": {"kernel": shared_kernel}}, hidden_states) + else: + lm_logits = self.lm_head(hidden_states) + + if not return_dict: + return (lm_logits,) + outputs[1:] + + return FlaxCausalLMOutput(logits=lm_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions) + + +@add_start_docstrings( + """ + The GPTNeo Model transformer with a language modeling head on top (linear layer with weights tied to the input + embeddings). + """, + GPT_NEO_START_DOCSTRING, +) +class FlaxGPTNeoForCausalLM(FlaxGPTNeoPreTrainedModel): + module_class = FlaxGPTNeoForCausalLMModule + + def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jax.Array] = None): + # initializing the cache + batch_size, seq_length = input_ids.shape + + past_key_values = self.init_cache(batch_size, max_length) + # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. + # But since GPTNeo uses a causal mask, those positions are masked anyways. + # Thus we can create a single static attention_mask here, which is more efficient for compilation + extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") + if attention_mask is not None: + position_ids = attention_mask.cumsum(axis=-1) - 1 + extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) + else: + position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) + + return { + "past_key_values": past_key_values, + "attention_mask": extended_attention_mask, + "position_ids": position_ids, + } + + def update_inputs_for_generation(self, model_outputs, model_kwargs): + model_kwargs["past_key_values"] = model_outputs.past_key_values + model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 + return model_kwargs + + +append_call_sample_docstring(FlaxGPTNeoForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutput, _CONFIG_FOR_DOC) diff --git a/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/modeling_gpt_neo.py b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/modeling_gpt_neo.py new file mode 100644 index 0000000000000000000000000000000000000000..2fbf4677ca6f442a1ba8d0a1cb515269f08ce2f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/gpt_neo/modeling_gpt_neo.py @@ -0,0 +1,1346 @@ +# coding=utf-8 +# Copyright 2021 The Eleuther AI and HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch GPT Neo model.""" + + +import os +from typing import Optional, Tuple, Union + +import torch +import torch.nn.functional as F +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN +from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask +from ...modeling_outputs import ( + BaseModelOutputWithPast, + BaseModelOutputWithPastAndCrossAttentions, + CausalLMOutputWithCrossAttentions, + CausalLMOutputWithPast, + QuestionAnsweringModelOutput, + SequenceClassifierOutputWithPast, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel +from ...pytorch_utils import is_torch_greater_or_equal_than_1_13 +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + is_flash_attn_2_available, + is_flash_attn_greater_or_equal_2_10, + is_torch_fx_available, + logging, +) +from .configuration_gpt_neo import GPTNeoConfig + + +if is_flash_attn_2_available(): + from flash_attn import flash_attn_func, flash_attn_varlen_func + from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa + + +# This makes `_prepare_4d_causal_attention_mask` a leaf function in the FX graph. +# It means that the function will not be traced through and simply appear as a node in the graph. +if is_torch_fx_available(): + if not is_torch_greater_or_equal_than_1_13: + import torch.fx + + _prepare_4d_causal_attention_mask = torch.fx.wrap(_prepare_4d_causal_attention_mask) + + +logger = logging.get_logger(__name__) + +_CONFIG_FOR_DOC = "GPTNeoConfig" + + +from ..deprecated._archive_maps import GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-neo-1.3B" + + +# Copied from transformers.models.llama.modeling_llama._get_unpad_data +def _get_unpad_data(attention_mask): + seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32) + indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() + max_seqlen_in_batch = seqlens_in_batch.max().item() + cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0)) + return ( + indices, + cu_seqlens, + max_seqlen_in_batch, + ) + + +def load_tf_weights_in_gpt_neo(model, config, gpt_neo_checkpoint_path): + """Load tf checkpoints in a pytorch model""" + try: + import re + + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(gpt_neo_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + names = [] + arrays = [] + for name, shape in init_vars: + if "global_step" not in name and "adam" not in name: + array = tf.train.load_variable(tf_path, name) + array = tf.dtypes.cast(array.squeeze(), tf.float32).numpy() + name = name.replace("attn/q", "attn/attention/q_proj/w") + name = name.replace("attn/k", "attn/attention/k_proj/w") + name = name.replace("attn/v", "attn/attention/v_proj/w") + name = name.replace("attn/o", "attn/attention/out_proj/w") + name = name.replace("norm_1", "ln_1") + name = name.replace("norm_2", "ln_2") + name = name.replace("attn/compute_output_bias/o_b", "attn/attention/out_proj/b") + name = name.replace("conv1d_main/c_fc/kernel", "c_fc/w") + name = name.replace("conv1d_main/c_fc/bias", "c_fc/b") + name = name.replace("conv1d_main/c_proj/kernel", "c_proj/w") + name = name.replace("conv1d_main/c_proj/bias", "c_proj/b") + + names.append(name) + arrays.append(array) + + for name, array in zip(names, arrays): + name = name[5:] # skip "gpt2/" + name = name.split("/") + pointer = model.transformer + for m_name in name: + if re.fullmatch(r"[A-Za-z]+\d+", m_name): + scope_names = re.split(r"(\d+)", m_name) + else: + scope_names = [m_name] + if scope_names[0] == "w" or scope_names[0] == "g": + pointer = getattr(pointer, "weight") + elif scope_names[0] == "b": + pointer = getattr(pointer, "bias") + elif scope_names[0] == "wpe" or scope_names[0] == "wte": + pointer = getattr(pointer, scope_names[0]) + pointer = getattr(pointer, "weight") + else: + pointer = getattr(pointer, scope_names[0]) + if len(scope_names) >= 2: + num = int(scope_names[1]) + pointer = pointer[num] + + if name[-1] == "w" and name[-2] in ["out_proj", "k_proj", "q_proj", "v_proj", "c_proj", "c_fc"]: + array = array.transpose() + + if name == ["wte"]: + # if vocab is padded, then trim off the padding embeddings + array = array[: config.vocab_size] + + if pointer.shape != array.shape: + raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched {name}") + + print(f"Initialize PyTorch weight {name}") + pointer.data = torch.from_numpy(array) + + # init the final linear layer using word embeddings + embs = model.transformer.wte.weight + lin = nn.Linear(embs.size()[1], embs.size()[0], bias=False) + lin.weight = embs + model.set_output_embeddings(lin) + return model + + +class GPTNeoSelfAttention(nn.Module): + def __init__(self, config, attention_type): + super().__init__() + self.config = config + + max_positions = config.max_position_embeddings + bias = torch.tril(torch.ones((max_positions, max_positions), dtype=bool)).view( + 1, 1, max_positions, max_positions + ) + + # local causal self attention is a sliding window where each token can only attend to the previous + # window_size tokens. This is implemented by updating the causal mask such that for each token + # all other tokens are masked except the previous window_size tokens. + if attention_type == "local": + bias = torch.bitwise_xor(bias, torch.tril(bias, -config.window_size)) + + self.register_buffer("bias", bias, persistent=False) + self.register_buffer("masked_bias", torch.tensor(-1e9), persistent=False) + + self.attn_dropout = nn.Dropout(float(config.attention_dropout)) + self.resid_dropout = nn.Dropout(float(config.resid_dropout)) + self.is_causal = True + + self.embed_dim = config.hidden_size + self.num_heads = config.num_heads + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" + f" {self.num_heads})." + ) + + self.k_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) + self.v_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) + self.q_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=False) + self.out_proj = nn.Linear(self.embed_dim, self.embed_dim, bias=True) + + def _split_heads(self, tensor, num_heads, attn_head_size): + """ + Splits hidden_size dim into attn_head_size and num_heads + """ + new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) + tensor = tensor.view(new_shape) + return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features) + + def _merge_heads(self, tensor, num_heads, attn_head_size): + """ + Merges attn_head_size dim and num_attn_heads dim into hidden_size + """ + tensor = tensor.permute(0, 2, 1, 3).contiguous() + new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) + return tensor.view(new_shape) + + def _attn(self, query, key, value, attention_mask=None, head_mask=None): + # Keep the attention weights computation in fp32 to avoid overflow issues + query = query.to(torch.float32) + key = key.to(torch.float32) + + attn_weights = torch.matmul(query, key.transpose(-1, -2)) + + query_length, key_length = query.size(-2), key.size(-2) + causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length] + mask_value = torch.finfo(attn_weights.dtype).min + # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. + # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` + mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(attn_weights.device) + attn_weights = torch.where(causal_mask, attn_weights, mask_value) + + if attention_mask is not None: + # Apply the attention mask + attn_weights = attn_weights + attention_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + attn_weights = attn_weights.to(value.dtype) + attn_weights = self.attn_dropout(attn_weights) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + attn_output = torch.matmul(attn_weights, value) + + return attn_output, attn_weights + + def forward( + self, + hidden_states, + attention_mask=None, + layer_past=None, + head_mask=None, + use_cache=False, + output_attentions=False, + ): + query = self.q_proj(hidden_states) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if layer_past is not None: + past_key = layer_past[0] + past_value = layer_past[1] + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + if use_cache is True: + present = (key, value) + else: + present = None + + attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask) + + attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) + attn_output = self.out_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights,) + + return outputs # a, present, (attentions) + + +class GPTNeoFlashAttention2(GPTNeoSelfAttention): + """ + GPTNeo flash attention module. This module inherits from `GPTNeoSelfAttention` as the weights of the module stays + untouched. The only required change would be on the forward pass where it needs to correctly call the public API of + flash attention and deal with padding tokens in case the input contains any of them. + """ + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2.__init__ + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1. + # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0. + # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left). + self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() + + def forward( + self, + hidden_states, + attention_mask=None, + layer_past=None, + head_mask=None, + use_cache=False, + output_attentions=False, + ): + bsz, _, _ = hidden_states.size() + + query = self.q_proj(hidden_states) + key = self.k_proj(hidden_states) + value = self.v_proj(hidden_states) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + if layer_past is not None: + past_key = layer_past[0] + past_value = layer_past[1] + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + if use_cache is True: + present = (key, value) + else: + present = None + + query_length = query.shape[2] + tgt_len = key.shape[2] + + # Flash attention requires the input to have the shape + # batch_size x seq_length x head_dim x hidden_dim + query = query.transpose(1, 2).view(bsz, query_length, self.num_heads, self.head_dim) + key = key.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) + value = value.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) + + attn_dropout = self.config.attention_dropout if self.training else 0.0 + + # In PEFT, usually we cast the layer norms in float32 for training stability reasons + # therefore the input hidden states gets silently casted in float32. Hence, we need + # cast them back in the correct dtype just to be sure everything works as expected. + # This might slowdown training & inference so it is recommended to not cast the LayerNorms + # in fp32. (LlamaRMSNorm handles it correctly) + + if query.dtype == torch.float32: + if torch.is_autocast_enabled(): + target_dtype = torch.get_autocast_gpu_dtype() + # Handle the case where the model is quantized + elif hasattr(self.config, "_pre_quantization_dtype"): + target_dtype = self.config._pre_quantization_dtype + else: + target_dtype = self.q_proj.weight.dtype + + logger.warning_once( + f"The input hidden states seems to be silently casted in float32, this might be related to" + f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" + f" {target_dtype}." + ) + + query = query.to(target_dtype) + key = key.to(target_dtype) + value = value.to(target_dtype) + + attn_output = self._flash_attention_forward( + query, key, value, attention_mask, query_length, dropout=attn_dropout, softmax_scale=1.0 + ) + + attn_weights_reshaped = attn_output.reshape(bsz, query_length, self.num_heads * self.head_dim) + attn_output = self.out_proj(attn_weights_reshaped) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights_reshaped,) + + return outputs + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._flash_attention_forward + def _flash_attention_forward( + self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None + ): + """ + Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token + first unpad the input, then computes the attention scores and pad the final attention scores. + + Args: + query_states (`torch.Tensor`): + Input query states to be passed to Flash Attention API + key_states (`torch.Tensor`): + Input key states to be passed to Flash Attention API + value_states (`torch.Tensor`): + Input value states to be passed to Flash Attention API + attention_mask (`torch.Tensor`): + The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the + position of padding tokens and 1 for the position of non-padding tokens. + dropout (`float`): + Attention dropout + softmax_scale (`float`, *optional*): + The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim) + """ + if not self._flash_attn_uses_top_left_mask: + causal = self.is_causal + else: + # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__. + causal = self.is_causal and query_length != 1 + + # Contains at least one padding token in the sequence + if attention_mask is not None: + batch_size = query_states.shape[0] + query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input( + query_states, key_states, value_states, attention_mask, query_length + ) + + cu_seqlens_q, cu_seqlens_k = cu_seq_lens + max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens + + attn_output_unpad = flash_attn_varlen_func( + query_states, + key_states, + value_states, + cu_seqlens_q=cu_seqlens_q, + cu_seqlens_k=cu_seqlens_k, + max_seqlen_q=max_seqlen_in_batch_q, + max_seqlen_k=max_seqlen_in_batch_k, + dropout_p=dropout, + softmax_scale=softmax_scale, + causal=causal, + ) + + attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length) + else: + attn_output = flash_attn_func( + query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal + ) + + return attn_output + + # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2._upad_input + def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length): + indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask) + batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape + + key_layer = index_first_axis( + key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + value_layer = index_first_axis( + value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k + ) + if query_length == kv_seq_len: + query_layer = index_first_axis( + query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k + ) + cu_seqlens_q = cu_seqlens_k + max_seqlen_in_batch_q = max_seqlen_in_batch_k + indices_q = indices_k + elif query_length == 1: + max_seqlen_in_batch_q = 1 + cu_seqlens_q = torch.arange( + batch_size + 1, dtype=torch.int32, device=query_layer.device + ) # There is a memcpy here, that is very bad. + indices_q = cu_seqlens_q[:-1] + query_layer = query_layer.squeeze(1) + else: + # The -q_len: slice assumes left padding. + attention_mask = attention_mask[:, -query_length:] + query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask) + + return ( + query_layer, + key_layer, + value_layer, + indices_q, + (cu_seqlens_q, cu_seqlens_k), + (max_seqlen_in_batch_q, max_seqlen_in_batch_k), + ) + + +GPT_NEO_ATTENTION_CLASSES = { + "eager": GPTNeoSelfAttention, + "flash_attention_2": GPTNeoFlashAttention2, +} + + +class GPTNeoAttention(nn.Module): + def __init__(self, config, layer_id=0): + super().__init__() + self.layer_id = layer_id + self.attention_layers = config.attention_layers + self.attention_type = self.attention_layers[layer_id] + + if self.attention_type in ["global", "local"]: + self.attention = GPT_NEO_ATTENTION_CLASSES[config._attn_implementation](config, self.attention_type) + else: + raise NotImplementedError( + "Only attn layer types 'global' and 'local' exist, but got `config.attention_layers`: " + f"{config.attention_layers}. Select attn layer types from ['global', 'local'] only." + ) + + def forward( + self, + hidden_states, + layer_past=None, + attention_mask=None, + head_mask=None, + use_cache=False, + output_attentions=False, + ): + return self.attention( + hidden_states, + attention_mask=attention_mask, + layer_past=layer_past, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + + +class GPTNeoMLP(nn.Module): + def __init__(self, intermediate_size, config): # in MLP: intermediate_size= 4 * hidden_size + super().__init__() + embed_dim = config.hidden_size + self.c_fc = nn.Linear(embed_dim, intermediate_size) + self.c_proj = nn.Linear(intermediate_size, embed_dim) + self.act = ACT2FN[config.activation_function] + self.dropout = nn.Dropout(float(config.resid_dropout)) + + def forward(self, hidden_states): + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +class GPTNeoBlock(nn.Module): + def __init__(self, config, layer_id): + super().__init__() + hidden_size = config.hidden_size + inner_dim = config.intermediate_size if config.intermediate_size is not None else 4 * hidden_size + self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.attn = GPTNeoAttention(config, layer_id) + self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.mlp = GPTNeoMLP(inner_dim, config) + + def forward( + self, + hidden_states, + layer_past=None, + attention_mask=None, + head_mask=None, + use_cache=False, + output_attentions=False, + ): + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_outputs = self.attn( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + attn_output = attn_outputs[0] # output_attn: a, present, (attentions) + outputs = attn_outputs[1:] + # residual connection + hidden_states = attn_output + residual + + residual = hidden_states + hidden_states = self.ln_2(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + feed_forward_hidden_states + + if use_cache: + outputs = (hidden_states,) + outputs + else: + outputs = (hidden_states,) + outputs[1:] + + return outputs # hidden_states, present, (attentions, cross_attentions) + + +class GPTNeoPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = GPTNeoConfig + load_tf_weights = load_tf_weights_in_gpt_neo + base_model_prefix = "transformer" + supports_gradient_checkpointing = True + _no_split_modules = ["GPTNeoBlock"] + _skip_keys_device_placement = "past_key_values" + _supports_flash_attn_2 = True + + def __init__(self, *inputs, **kwargs): + super().__init__(*inputs, **kwargs) + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear,)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +GPT_NEO_START_DOCSTRING = r""" + + This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. + Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage + and behavior. + + Parameters: + config ([`GPTNeoConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +GPT_NEO_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): + `input_ids_length` = `sequence_length` if `past_key_values` is `None` else + `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input + sequence tokens in the vocabulary. + + If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as + `input_ids`. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.num_layers`): + Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see + `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have + their past given to this model should not be passed as `input_ids` as they have already been computed. + attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + + If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see + `past_key_values`). + use_cache (`bool`, *optional*): + If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see + `past_key_values`). + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare GPT Neo Model transformer outputting raw hidden-states without any specific head on top.", + GPT_NEO_START_DOCSTRING, +) +class GPTNeoModel(GPTNeoPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.embed_dim = config.hidden_size + self.wte = nn.Embedding(config.vocab_size, self.embed_dim) + self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) + self.drop = nn.Dropout(float(config.embed_dropout)) + self.h = nn.ModuleList([GPTNeoBlock(config, layer_id=i) for i in range(config.num_layers)]) + self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2" + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + self.gradient_checkpointing = False + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.wte + + def set_input_embeddings(self, new_embeddings): + self.wte = new_embeddings + + @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithPastAndCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if token_type_ids is not None: + token_type_ids = token_type_ids.view(-1, input_shape[-1]) + + if past_key_values is None: + past_length = 0 + past_key_values = tuple([None] * len(self.h)) + else: + past_length = past_key_values[0][0].size(-2) + + if position_ids is None: + position_ids = torch.arange(past_length, input_shape[-1] + past_length, dtype=torch.long, device=device) + position_ids = position_ids.unsqueeze(0) + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x num_heads x N x N + # head_mask has shape n_layer x batch x num_heads x N x N + head_mask = self.get_head_mask(head_mask, self.config.num_layers) + + if inputs_embeds is None: + inputs_embeds = self.wte(input_ids) + position_embeds = self.wpe(position_ids) + hidden_states = inputs_embeds + position_embeds + + # Attention mask. + if self._use_flash_attention_2: + # 2d mask is passed through the layers + attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None + else: + # 4d mask is passed through the layers + attention_mask = _prepare_4d_causal_attention_mask(attention_mask, input_shape, inputs_embeds, past_length) + + if token_type_ids is not None: + token_type_embeds = self.wte(token_type_ids) + hidden_states = hidden_states + token_type_embeds + + hidden_states = self.drop(hidden_states) + + output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." + ) + use_cache = False + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_hidden_states = () if output_hidden_states else None + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if self.gradient_checkpointing and self.training: + outputs = self._gradient_checkpointing_func( + block.__call__, + hidden_states, + None, + attention_mask, + head_mask[i], + use_cache, + output_attentions, + ) + else: + outputs = block( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + head_mask=head_mask[i], + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = outputs[0] + if use_cache is True: + presents = presents + (outputs[1],) + + if output_attentions: + all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) + + hidden_states = self.ln_f(hidden_states) + + hidden_states = hidden_states.view(output_shape) + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None) + + return BaseModelOutputWithPast( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + ) + + +@add_start_docstrings( + """ + The GPT Neo Model transformer with a language modeling head on top (linear layer with weights tied to the input + embeddings). + """, + GPT_NEO_START_DOCSTRING, +) +class GPTNeoForCausalLM(GPTNeoPreTrainedModel): + _tied_weights_keys = ["lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + self.transformer = GPTNeoModel(config) + self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.lm_head + + def set_output_embeddings(self, new_embeddings): + self.lm_head = new_embeddings + + def prepare_inputs_for_generation(self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs): + token_type_ids = kwargs.get("token_type_ids", None) + # Omit tokens covered by past_key_values + if past_key_values: + past_length = past_key_values[0][0].shape[2] + + # Some generation methods already pass only the last input ID + if input_ids.shape[1] > past_length: + remove_prefix_length = past_length + else: + # Default to old behavior: keep only final ID + remove_prefix_length = input_ids.shape[1] - 1 + + input_ids = input_ids[:, remove_prefix_length:] + if token_type_ids is not None: + token_type_ids = token_type_ids[:, -input_ids.shape[1] :] + + attention_mask = kwargs.get("attention_mask", None) + position_ids = kwargs.get("position_ids", None) + + if attention_mask is not None and position_ids is None: + # create position_ids on the fly for batch generation + position_ids = attention_mask.long().cumsum(-1) - 1 + position_ids.masked_fill_(attention_mask == 0, 1) + if past_key_values: + position_ids = position_ids[:, -input_ids.shape[1] :] + + # if `inputs_embeds` are passed, we only want to use them in the 1st generation step + if inputs_embeds is not None and past_key_values is None: + model_inputs = {"inputs_embeds": inputs_embeds} + else: + model_inputs = {"input_ids": input_ids} + + model_inputs.update( + { + "past_key_values": past_key_values, + "use_cache": kwargs.get("use_cache"), + "position_ids": position_ids, + "attention_mask": attention_mask, + "token_type_ids": token_type_ids, + } + ) + + return model_inputs + + @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=CausalLMOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set + `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` + are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + + lm_logits = self.lm_head(hidden_states) + + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(lm_logits.device) + # Compute loss in fp32 to match with mesh-tf version + # https://github.com/EleutherAI/gpt-neo/blob/89ce74164da2fb16179106f54e2269b5da8db333/models/gpt2/gpt2.py#L179 + lm_logits = lm_logits.to(torch.float32) + + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].contiguous() + shift_labels = labels[..., 1:].contiguous() + # Flatten the tokens + loss_fct = CrossEntropyLoss() + loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)) + + lm_logits = lm_logits.to(hidden_states.dtype) + loss = loss.to(hidden_states.dtype) + + if not return_dict: + output = (lm_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return CausalLMOutputWithPast( + loss=loss, + logits=lm_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + @staticmethod + def _reorder_cache( + past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor + ) -> Tuple[Tuple[torch.Tensor]]: + """ + This function is used to re-order the `past_key_values` cache if [`~PretrainedModel.beam_search`] or + [`~PretrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct + beam_idx at every generation step. + """ + return tuple( + tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past) + for layer_past in past_key_values + ) + + +@add_start_docstrings( + """ + The GPTNeo Model transformer with a sequence classification head on top (linear layer). + + [`GPTNeoForSequenceClassification`] uses the last token in order to do the classification, as other causal models + (e.g. GPT-1) do. + + Since it does classification on the last token, it requires to know the position of the last token. If a + `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If + no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the + padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in + each row of the batch). + """, + GPT_NEO_START_DOCSTRING, +) +class GPTNeoForSequenceClassification(GPTNeoPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.transformer = GPTNeoModel(config) + self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=SequenceClassifierOutputWithPast, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + past_key_values: Optional[Tuple[torch.FloatTensor]] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + labels: Optional[torch.Tensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + hidden_states = transformer_outputs[0] + logits = self.score(hidden_states) + + if input_ids is not None: + batch_size, sequence_length = input_ids.shape[:2] + else: + batch_size, sequence_length = inputs_embeds.shape[:2] + + if self.config.pad_token_id is None and batch_size != 1: + raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.") + if self.config.pad_token_id is None: + sequence_lengths = -1 + else: + if input_ids is not None: + # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility + sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 + sequence_lengths = sequence_lengths % input_ids.shape[-1] + sequence_lengths = sequence_lengths.to(logits.device) + else: + sequence_lengths = -1 + logger.warning( + f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " + "unexpected if using padding tokens in conjunction with `inputs_embeds.`" + ) + + pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths] + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(pooled_logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(pooled_logits, labels) + if not return_dict: + output = (pooled_logits,) + transformer_outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutputWithPast( + loss=loss, + logits=pooled_logits, + past_key_values=transformer_outputs.past_key_values, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ + GPT Neo model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + GPT_NEO_START_DOCSTRING, +) +class GPTNeoForTokenClassification(GPTNeoPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.transformer = GPTNeoModel(config) + self.dropout = nn.Dropout(config.classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING) + @add_code_sample_docstrings( + checkpoint="EleutherAI/gpt-neo-125m", + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + expected_loss=0.25, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + transformer_outputs = self.transformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + hidden_states = transformer_outputs[0] + hidden_states = self.dropout(hidden_states) + logits = self.classifier(hidden_states) + + loss = None + if labels is not None: + labels = labels.to(logits.device) + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + transformer_outputs[2:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=transformer_outputs.hidden_states, + attentions=transformer_outputs.attentions, + ) + + +@add_start_docstrings( + """ + The GPT-Neo Model transformer with a span classification head on top for extractive question-answering tasks like + SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + GPT_NEO_START_DOCSTRING, +) +class GPTNeoForQuestionAnswering(GPTNeoPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.transformer = GPTNeoModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, 2) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(GPT_NEO_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + real_checkpoint=_CHECKPOINT_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.transformer( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[2:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + )