applied-ai-018 commited on
Commit
1946a6b
·
verified ·
1 Parent(s): ab8aba3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. lm-evaluation-harness/tests/testdata/anagrams1-v0-greedy_until +1 -0
  3. lm-evaluation-harness/tests/testdata/anli_r3-v0-loglikelihood +1 -0
  4. lm-evaluation-harness/tests/testdata/anli_r3-v0-res.json +1 -0
  5. lm-evaluation-harness/tests/testdata/arithmetic_2ds-v0-res.json +1 -0
  6. lm-evaluation-harness/tests/testdata/arithmetic_5ds-v0-loglikelihood +1 -0
  7. lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_1-v0-loglikelihood +1 -0
  8. lm-evaluation-harness/tests/testdata/blimp_drop_argument-v0-loglikelihood +1 -0
  9. lm-evaluation-harness/tests/testdata/blimp_existential_there_subject_raising-v0-res.json +1 -0
  10. lm-evaluation-harness/tests/testdata/blimp_npi_present_2-v0-loglikelihood +1 -0
  11. lm-evaluation-harness/tests/testdata/blimp_principle_A_case_2-v0-loglikelihood +1 -0
  12. lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_3-v0-loglikelihood +1 -0
  13. lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_3-v0-res.json +1 -0
  14. lm-evaluation-harness/tests/testdata/blimp_principle_A_reconstruction-v0-loglikelihood +1 -0
  15. lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_scope-v0-res.json +1 -0
  16. lm-evaluation-harness/tests/testdata/blimp_sentential_subject_island-v0-loglikelihood +1 -0
  17. lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_2-v0-loglikelihood +1 -0
  18. lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap_long_distance-v0-res.json +1 -0
  19. lm-evaluation-harness/tests/testdata/crows_pairs_english_age-v0-res.json +1 -0
  20. lm-evaluation-harness/tests/testdata/crows_pairs_english_autre-v0-res.json +1 -0
  21. lm-evaluation-harness/tests/testdata/crows_pairs_english_sexual_orientation-v0-res.json +1 -0
  22. lm-evaluation-harness/tests/testdata/drop-v0-res.json +1 -0
  23. lm-evaluation-harness/tests/testdata/drop-v1-greedy_until +1 -0
  24. lm-evaluation-harness/tests/testdata/headqa_es-v0-res.json +1 -0
  25. lm-evaluation-harness/tests/testdata/hendrycksTest-college_medicine-v0-loglikelihood +1 -0
  26. lm-evaluation-harness/tests/testdata/hendrycksTest-global_facts-v0-res.json +1 -0
  27. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_chemistry-v0-loglikelihood +1 -0
  28. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_government_and_politics-v0-res.json +1 -0
  29. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_us_history-v0-loglikelihood +1 -0
  30. lm-evaluation-harness/tests/testdata/hendrycksTest-professional_accounting-v0-res.json +1 -0
  31. lm-evaluation-harness/tests/testdata/hendrycksTest-public_relations-v0-loglikelihood +1 -0
  32. lm-evaluation-harness/tests/testdata/math_counting_and_prob-v1-greedy_until +1 -0
  33. lm-evaluation-harness/tests/testdata/math_precalc-v0-greedy_until +1 -0
  34. lm-evaluation-harness/tests/testdata/mc_taco-v0-loglikelihood +1 -0
  35. lm-evaluation-harness/tests/testdata/mutual_plus-v1-res.json +1 -0
  36. lm-evaluation-harness/tests/testdata/pile_opensubtitles-v1-loglikelihood_rolling +1 -0
  37. lm-evaluation-harness/tests/testdata/qa4mre_2011-v0-loglikelihood +1 -0
  38. lm-evaluation-harness/tests/testdata/squad2-v0-loglikelihood +1 -0
  39. lm-evaluation-harness/tests/testdata/wmt16-en-de-v0-res.json +1 -0
  40. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so +3 -0
  41. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 +3 -0
  42. venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so +3 -0
  43. venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/configuration_bloom.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/convert_bloom_original_checkpoint_to_pytorch.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_bloom.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_flax_bloom.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/tokenization_bloom_fast.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/transformers/models/bloom/modeling_bloom.py +1243 -0
  50. venv/lib/python3.10/site-packages/transformers/models/codegen/__init__.py +73 -0
.gitattributes CHANGED
@@ -83,3 +83,6 @@ venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs d
83
  venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=lfs diff=lfs merge=lfs -text
84
  venv/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 filter=lfs diff=lfs merge=lfs -text
85
  venv/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
 
83
  venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=lfs diff=lfs merge=lfs -text
84
  venv/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 filter=lfs diff=lfs merge=lfs -text
85
  venv/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
86
+ venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so filter=lfs diff=lfs merge=lfs -text
87
+ venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text
88
+ venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so filter=lfs diff=lfs merge=lfs -text
lm-evaluation-harness/tests/testdata/anagrams1-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 7c0c5246d3f751f39119a5629ac1d4b2c6fd2a315f78d6de9b2c387e24e3fef1
lm-evaluation-harness/tests/testdata/anli_r3-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6b6e5c6a794f2fbff78b7aa24fe0c90156039334bbd1cb34f7af9fc6e6183845
lm-evaluation-harness/tests/testdata/anli_r3-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"anli_r3": {"acc": 0.31916666666666665, "acc_stderr": 0.01346230971200514}}, "versions": {"anli_r3": 0}}
lm-evaluation-harness/tests/testdata/arithmetic_2ds-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arithmetic_2ds": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_2ds": 0}}
lm-evaluation-harness/tests/testdata/arithmetic_5ds-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 2888d6d098a5ef8c1e7f0d8295ba80826e2e04e431f57508dfb71d53e1cd4604
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 2df8cc7f17089f7e8c7d974dcb324c809d30ef059a5be22aed6b69f44230809f
lm-evaluation-harness/tests/testdata/blimp_drop_argument-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 616109e63f162dcd31a632943e7ef0c9e0431afeb179e83e9b04b39007b16f5b
lm-evaluation-harness/tests/testdata/blimp_existential_there_subject_raising-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_existential_there_subject_raising": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_existential_there_subject_raising": 0}}
lm-evaluation-harness/tests/testdata/blimp_npi_present_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ fdb688ac6259bb65d234ef0a36e9a9ee449f9608f633b12e1943b462aead8e17
lm-evaluation-harness/tests/testdata/blimp_principle_A_case_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ cd68adb65c891d672e22bf53c054b2083ab08bc1da43951732b409c942d14bc7
lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_3-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 38454befedcf1f3f6ef27d3bef9ccfdfb3e94a7ab32d86a63493a920d2d50093
lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_3-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_domain_3": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_3": 0}}
lm-evaluation-harness/tests/testdata/blimp_principle_A_reconstruction-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 894efedfd8750d5b8de6157f9b2ed2b51b5290d3a78ea9b041fc62d34e96efbc
lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_scope-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_sentential_negation_npi_scope": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_sentential_negation_npi_scope": 0}}
lm-evaluation-harness/tests/testdata/blimp_sentential_subject_island-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 80f5f98fad26240de2767fe58c4b18d864df41cbfa76f06c84c3fce9f14f4833
lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 59c20ff0f632cf42afc74ecc682cf92e5e740417b01e6cf9a610a3bc544d2ea5
lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_with_gap_long_distance-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_wh_vs_that_with_gap_long_distance": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_vs_that_with_gap_long_distance": 0}}
lm-evaluation-harness/tests/testdata/crows_pairs_english_age-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english_age": {"likelihood_difference": 0.3160680928470684, "likelihood_difference_stderr": 0.02397758321605678, "pct_stereotype": 0.43956043956043955, "pct_stereotype_stderr": 0.05231815698566189}}, "versions": {"crows_pairs_english_age": 0}}
lm-evaluation-harness/tests/testdata/crows_pairs_english_autre-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english_autre": {"likelihood_difference": 0.3424336593343321, "likelihood_difference_stderr": 0.08588068996335849, "pct_stereotype": 0.2727272727272727, "pct_stereotype_stderr": 0.14083575804390605}}, "versions": {"crows_pairs_english_autre": 0}}
lm-evaluation-harness/tests/testdata/crows_pairs_english_sexual_orientation-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english_sexual_orientation": {"likelihood_difference": 0.31947594049467243, "likelihood_difference_stderr": 0.024404952720497735, "pct_stereotype": 0.43010752688172044, "pct_stereotype_stderr": 0.051616798980291805}}, "versions": {"crows_pairs_english_sexual_orientation": 0}}
lm-evaluation-harness/tests/testdata/drop-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"drop": {"em": 0.0, "em_stderr": 0.0, "f1": 0.0, "f1_stderr": 0.0}}, "versions": {"drop": 0}}
lm-evaluation-harness/tests/testdata/drop-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ a670f911ab2999d72db15f534b22703d19e7837edbda4f9f199ad587f7aae6b2
lm-evaluation-harness/tests/testdata/headqa_es-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"headqa_es": {"acc": 0.23559445660102116, "acc_norm": 0.25018234865062, "acc_norm_stderr": 0.008272783230806014, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa_es": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-college_medicine-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ dd6e0a9be1407890e9f8cd4434fb6aa4752ab3d2473837fd465ad99f60ad685e
lm-evaluation-harness/tests/testdata/hendrycksTest-global_facts-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-global_facts": {"acc": 0.23, "acc_norm": 0.23, "acc_norm_stderr": 0.04229525846816507, "acc_stderr": 0.04229525846816507}}, "versions": {"hendrycksTest-global_facts": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_chemistry-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ f4f338e45415c4b5ee7f1d249155bcd910c8401bd1436760a5ec61cb6bb211b6
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_government_and_politics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_government_and_politics": {"acc": 0.24352331606217617, "acc_norm": 0.23834196891191708, "acc_norm_stderr": 0.03074890536390988, "acc_stderr": 0.030975436386845436}}, "versions": {"hendrycksTest-high_school_government_and_politics": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_us_history-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 8c65c1a28330dd001d395ac11f1bb80c3b33f5935f503e74067aef6e9e1d9d9b
lm-evaluation-harness/tests/testdata/hendrycksTest-professional_accounting-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-professional_accounting": {"acc": 0.2553191489361702, "acc_norm": 0.26595744680851063, "acc_norm_stderr": 0.026358065698880582, "acc_stderr": 0.026011992930902006}}, "versions": {"hendrycksTest-professional_accounting": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-public_relations-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ab70f500cf24e876f6ae6bdc27525a1d6074fa9b6ea97770255d9fc2559b36ff
lm-evaluation-harness/tests/testdata/math_counting_and_prob-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 2aa9ae43ee9dbb2457525247d7b65358632c5eaa9cbfc40cf95a4f17f5d942ad
lm-evaluation-harness/tests/testdata/math_precalc-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ bc834b06fd79473ca6fe38a51b714aad0bf0478c1b0eec787eca34dbdf69cb71
lm-evaluation-harness/tests/testdata/mc_taco-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 1811808ef05afd5f30ffc3471622a3dd7a1b681b17a2f7616695ad6b2a45943c
lm-evaluation-harness/tests/testdata/mutual_plus-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"mutual_plus": {"mrr": 0.5275583145221953, "mrr_stderr": 0.009940894824430708, "r@1": 0.26297968397291194, "r@1_stderr": 0.01479889176605113, "r@2": 0.5, "r@2_stderr": 0.01680731613632036}}, "versions": {"mutual_plus": 1}}
lm-evaluation-harness/tests/testdata/pile_opensubtitles-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 0f1c23a1f4ddec0c2b1ff34de8d1505b0eb9e2868d8edbcc1b6de13d02f32036
lm-evaluation-harness/tests/testdata/qa4mre_2011-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 0d09f17c65768e797633494d2d218e4e46a26f718cab8b0bf3d156b073a8c437
lm-evaluation-harness/tests/testdata/squad2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 287e87cc6878debcc80d9b6df4e2d0a74ed29068e0e0a80906c8441843a17cee
lm-evaluation-harness/tests/testdata/wmt16-en-de-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt16-en-de": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.010909486120840577, "chrf_stderr": 0.000122611124711072, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt16-en-de": 0}}
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcheckpoint.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1626ff119582bca46605bc6d49769ab75314b9993dd647bd64a90dec747bc843
3
+ size 1534104
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abc63100e9cf516b8ed1fa25354ae53dbfe8df4838ac525d8d738332c2198dc2
3
+ size 7419504
venv/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libnvperf_target.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa2587c8d211fbc85e8b88cca0bcebe78c8cc40c81b0c3763ce57ac9e63f0669
3
+ size 5895416
venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/configuration_bloom.cpython-310.pyc ADDED
Binary file (8.79 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/convert_bloom_original_checkpoint_to_pytorch.cpython-310.pyc ADDED
Binary file (6.27 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_bloom.cpython-310.pyc ADDED
Binary file (35.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/modeling_flax_bloom.cpython-310.pyc ADDED
Binary file (21.2 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bloom/__pycache__/tokenization_bloom_fast.cpython-310.pyc ADDED
Binary file (5.7 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/bloom/modeling_bloom.py ADDED
@@ -0,0 +1,1243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 HuggingFace Inc. team and BigScience workshop.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PyTorch BLOOM model."""
16
+
17
+ import math
18
+ import warnings
19
+ from typing import Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, LayerNorm, MSELoss
25
+ from torch.nn import functional as F
26
+
27
+ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
28
+ from ...modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
29
+ from ...modeling_outputs import (
30
+ BaseModelOutputWithPastAndCrossAttentions,
31
+ CausalLMOutputWithCrossAttentions,
32
+ QuestionAnsweringModelOutput,
33
+ SequenceClassifierOutputWithPast,
34
+ TokenClassifierOutput,
35
+ )
36
+ from ...modeling_utils import PreTrainedModel
37
+ from ...utils import logging
38
+ from .configuration_bloom import BloomConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ _CHECKPOINT_FOR_DOC = "bigscience/bloom-560m"
44
+ _CONFIG_FOR_DOC = "BloomConfig"
45
+
46
+
47
+ from ..deprecated._archive_maps import BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
48
+
49
+
50
+ def build_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
51
+ """
52
+ Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it
53
+ relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value
54
+ `softmax(l+a) = softmax(l)`. Based on
55
+ https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742
56
+ TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly.
57
+
58
+ Args:
59
+ Returns tensor shaped (batch_size * num_heads, 1, max_seq_len)
60
+ attention_mask (`torch.Tensor`):
61
+ Token-wise attention mask, this should be of shape (batch_size, max_seq_len).
62
+ num_heads (`int`, *required*):
63
+ number of heads
64
+ dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`):
65
+ dtype of the output tensor
66
+ """
67
+ batch_size, seq_length = attention_mask.shape
68
+ closest_power_of_2 = 2 ** math.floor(math.log2(num_heads))
69
+ base = torch.tensor(
70
+ 2 ** (-(2 ** -(math.log2(closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32
71
+ )
72
+ powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32)
73
+ slopes = torch.pow(base, powers)
74
+
75
+ if closest_power_of_2 != num_heads:
76
+ extra_base = torch.tensor(
77
+ 2 ** (-(2 ** -(math.log2(2 * closest_power_of_2) - 3))), device=attention_mask.device, dtype=torch.float32
78
+ )
79
+ num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
80
+ extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32)
81
+ slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
82
+
83
+ # Note: alibi will added to the attention bias that will be applied to the query, key product of attention
84
+ # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length)
85
+ # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length)
86
+ # => the query_length dimension will then be broadcasted correctly
87
+ # This is more or less identical to T5's relative position bias:
88
+ # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527
89
+ arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
90
+ alibi = slopes[..., None] * arange_tensor
91
+ return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype)
92
+
93
+
94
+ def dropout_add(x: torch.Tensor, residual: torch.Tensor, prob: float, training: bool) -> torch.Tensor:
95
+ """
96
+ Dropout add function
97
+
98
+ Args:
99
+ x (`torch.tensor`, *required*):
100
+ input tensor
101
+ residual (`torch.tensor`, *required*):
102
+ residual tensor
103
+ prob (`float`, *required*):
104
+ dropout probability
105
+ training (`bool`, *required*):
106
+ training mode
107
+ """
108
+ out = F.dropout(x, p=prob, training=training)
109
+ out = residual + out
110
+ return out
111
+
112
+
113
+ def bloom_gelu_forward(x: torch.Tensor) -> torch.Tensor:
114
+ """
115
+ Custom bias GELU function. Adapted from Megatron-DeepSpeed code. Here we use a simple implementation (inference) to
116
+ make the model jitable.
117
+
118
+ Args:
119
+ x (`torch.tensor`, *required*):
120
+ input hidden states
121
+ """
122
+ return x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))
123
+
124
+
125
+ def bloom_gelu_back(g: torch.Tensor, x: torch.Tensor) -> torch.Tensor:
126
+ """
127
+ gradient of tanh approximation of gelu gradient of actual gelu is: 0.5 * (1. + torch.erf(x * 0.70710678)) +
128
+ 0.3989423 * x * torch.exp(-0.5 * x * x)
129
+
130
+ Args:
131
+ g (`torch.tensor`, *required*):
132
+ gradient output tensor
133
+ x (`torch.tensor`, *required*):
134
+ input tensor
135
+ """
136
+ x = x[0] # x is a tuple of 1 element, needs to unpack it first
137
+ tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
138
+ # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
139
+ ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (1 + tanh_out)
140
+ return ff * g
141
+
142
+
143
+ class GeLUFunction(torch.autograd.Function):
144
+ @staticmethod
145
+ def forward(ctx, input: torch.Tensor) -> torch.Tensor:
146
+ ctx.save_for_backward(input)
147
+ return bloom_gelu_forward(input)
148
+
149
+ @staticmethod
150
+ def backward(ctx, grad_output: torch.Tensor) -> torch.Tensor:
151
+ input = ctx.saved_tensors
152
+ tmp = bloom_gelu_back(grad_output, input)
153
+ return tmp
154
+
155
+
156
+ class BloomGelu(nn.Module):
157
+ """
158
+ BloomBiasGelu wrapper function that make use of the simple function on inference mode to make the model
159
+ torchscriptable and use the autograd function in training mode to get the accurate results of the gradients Partly
160
+ copied from Megatron-DeepSpeed code and adapted for our needs
161
+
162
+ See here why autograd functions are not torchscriptable: https://github.com/pytorch/pytorch/issues/22329
163
+ """
164
+
165
+ def __init__(self):
166
+ super().__init__()
167
+
168
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
169
+ if self.training:
170
+ return GeLUFunction.apply(x)
171
+ else:
172
+ return bloom_gelu_forward(x)
173
+
174
+
175
+ class BloomAttention(nn.Module):
176
+ def __init__(self, config: BloomConfig):
177
+ super().__init__()
178
+
179
+ self.pretraining_tp = config.pretraining_tp
180
+ self.slow_but_exact = config.slow_but_exact
181
+
182
+ self.hidden_size = config.hidden_size
183
+ self.num_heads = config.n_head
184
+ self.head_dim = self.hidden_size // self.num_heads
185
+ self.split_size = self.hidden_size
186
+ self.hidden_dropout = config.hidden_dropout
187
+
188
+ if self.head_dim * self.num_heads != self.hidden_size:
189
+ raise ValueError(
190
+ f"`hidden_size` must be divisible by num_heads (got `hidden_size`: {self.hidden_size} and `num_heads`:"
191
+ f" {self.num_heads})."
192
+ )
193
+
194
+ # Layer-wise attention scaling
195
+ self.inv_norm_factor = 1.0 / math.sqrt(self.head_dim)
196
+ self.beta = 1.0
197
+
198
+ self.query_key_value = nn.Linear(self.hidden_size, 3 * self.hidden_size, bias=True)
199
+ self.dense = nn.Linear(self.hidden_size, self.hidden_size)
200
+ self.attention_dropout = nn.Dropout(config.attention_dropout)
201
+
202
+ def _split_heads(self, fused_qkv: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
203
+ """
204
+ Split the last dimension into (num_heads, head_dim) without making any copies, results share same memory
205
+ storage as `fused_qkv`
206
+
207
+ Args:
208
+ fused_qkv (`torch.tensor`, *required*): [batch_size, seq_length, num_heads * 3 * head_dim]
209
+
210
+ Returns:
211
+ query: [batch_size, seq_length, num_heads, head_dim] key: [batch_size, seq_length, num_heads, head_dim]
212
+ value: [batch_size, seq_length, num_heads, head_dim]
213
+ """
214
+ batch_size, seq_length, three_times_hidden_size = fused_qkv.shape
215
+ fused_qkv = fused_qkv.view(batch_size, seq_length, self.num_heads, 3, self.head_dim)
216
+ return fused_qkv[..., 0, :], fused_qkv[..., 1, :], fused_qkv[..., 2, :]
217
+
218
+ def _merge_heads(self, x: torch.Tensor) -> torch.Tensor:
219
+ """
220
+ Merge heads together over the last dimension
221
+
222
+ Args:
223
+ x (`torch.tensor`, *required*): [batch_size * num_heads, seq_length, head_dim]
224
+
225
+ Returns:
226
+ torch.tensor: [batch_size, seq_length, num_heads * head_dim]
227
+ """
228
+ # What we want to achieve is:
229
+ # batch_size * num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads * head_dim
230
+ batch_size_and_num_heads, seq_length, _ = x.shape
231
+ batch_size = batch_size_and_num_heads // self.num_heads
232
+
233
+ # First view to decompose the batch size
234
+ # batch_size * num_heads, seq_length, head_dim -> batch_size, num_heads, seq_length, head_dim
235
+ x = x.view(batch_size, self.num_heads, seq_length, self.head_dim)
236
+
237
+ # batch_size, num_heads, seq_length, head_dim -> batch_size, seq_length, num_heads, head_dim
238
+ x = x.permute(0, 2, 1, 3)
239
+
240
+ # batch_size, seq_length, num_heads, head_dim -> batch_size, seq_length, num_heads * head_dim
241
+ return x.reshape(batch_size, seq_length, self.num_heads * self.head_dim)
242
+
243
+ def forward(
244
+ self,
245
+ hidden_states: torch.Tensor,
246
+ residual: torch.Tensor,
247
+ alibi: torch.Tensor,
248
+ attention_mask: torch.Tensor,
249
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
250
+ head_mask: Optional[torch.Tensor] = None,
251
+ use_cache: bool = False,
252
+ output_attentions: bool = False,
253
+ ):
254
+ fused_qkv = self.query_key_value(hidden_states) # [batch_size, seq_length, 3 x hidden_size]
255
+
256
+ # 3 x [batch_size, seq_length, num_heads, head_dim]
257
+ (query_layer, key_layer, value_layer) = self._split_heads(fused_qkv)
258
+
259
+ batch_size, q_length, _, _ = query_layer.shape
260
+
261
+ query_layer = query_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim)
262
+ key_layer = key_layer.permute(0, 2, 3, 1).reshape(batch_size * self.num_heads, self.head_dim, q_length)
263
+ value_layer = value_layer.transpose(1, 2).reshape(batch_size * self.num_heads, q_length, self.head_dim)
264
+ if layer_past is not None:
265
+ past_key, past_value = layer_past
266
+ # concatenate along seq_length dimension:
267
+ # - key: [batch_size * self.num_heads, head_dim, kv_length]
268
+ # - value: [batch_size * self.num_heads, kv_length, head_dim]
269
+ key_layer = torch.cat((past_key, key_layer), dim=2)
270
+ value_layer = torch.cat((past_value, value_layer), dim=1)
271
+
272
+ _, _, kv_length = key_layer.shape
273
+
274
+ if use_cache is True:
275
+ present = (key_layer, value_layer)
276
+ else:
277
+ present = None
278
+
279
+ # [batch_size * num_heads, q_length, kv_length]
280
+ # we use `torch.Tensor.baddbmm` instead of `torch.baddbmm` as the latter isn't supported by TorchScript v1.11
281
+ matmul_result = alibi.baddbmm(
282
+ batch1=query_layer,
283
+ batch2=key_layer,
284
+ beta=self.beta,
285
+ alpha=self.inv_norm_factor,
286
+ )
287
+
288
+ # change view to [batch_size, num_heads, q_length, kv_length]
289
+ attention_scores = matmul_result.view(batch_size, self.num_heads, q_length, kv_length)
290
+
291
+ # cast attention scores to fp32, compute scaled softmax and cast back to initial dtype - [batch_size, num_heads, q_length, kv_length]
292
+ input_dtype = attention_scores.dtype
293
+ # `float16` has a minimum value of -65504.0, whereas `bfloat16` and `float32` have a minimum value of `-3.4e+38`
294
+ if input_dtype == torch.float16:
295
+ attention_scores = attention_scores.to(torch.float)
296
+ attn_weights = torch.masked_fill(attention_scores, attention_mask, torch.finfo(attention_scores.dtype).min)
297
+ attention_probs = F.softmax(attn_weights, dim=-1, dtype=torch.float32).to(input_dtype)
298
+
299
+ # [batch_size, num_heads, q_length, kv_length]
300
+ attention_probs = self.attention_dropout(attention_probs)
301
+
302
+ if head_mask is not None:
303
+ attention_probs = attention_probs * head_mask
304
+
305
+ # change view [batch_size x num_heads, q_length, kv_length]
306
+ attention_probs_reshaped = attention_probs.view(batch_size * self.num_heads, q_length, kv_length)
307
+
308
+ # matmul: [batch_size * num_heads, q_length, head_dim]
309
+ context_layer = torch.bmm(attention_probs_reshaped, value_layer)
310
+
311
+ # change view [batch_size, q_length, num_heads * head_dim]
312
+ context_layer = self._merge_heads(context_layer)
313
+
314
+ # aggregate results across tp ranks. See here: https://github.com/pytorch/pytorch/issues/76232
315
+ if self.pretraining_tp > 1 and self.slow_but_exact:
316
+ slices = self.hidden_size / self.pretraining_tp
317
+ output_tensor = torch.zeros_like(context_layer)
318
+ for i in range(self.pretraining_tp):
319
+ output_tensor = output_tensor + F.linear(
320
+ context_layer[:, :, int(i * slices) : int((i + 1) * slices)],
321
+ self.dense.weight[:, int(i * slices) : int((i + 1) * slices)],
322
+ )
323
+ else:
324
+ output_tensor = self.dense(context_layer)
325
+
326
+ output_tensor = dropout_add(output_tensor, residual, self.hidden_dropout, self.training)
327
+
328
+ outputs = (output_tensor, present)
329
+ if output_attentions:
330
+ outputs += (attention_probs,)
331
+
332
+ return outputs
333
+
334
+
335
+ class BloomMLP(nn.Module):
336
+ def __init__(self, config: BloomConfig):
337
+ super().__init__()
338
+ hidden_size = config.hidden_size
339
+
340
+ self.pretraining_tp = config.pretraining_tp
341
+ self.slow_but_exact = config.slow_but_exact
342
+ self.dense_h_to_4h = nn.Linear(hidden_size, 4 * hidden_size)
343
+ self.gelu_impl = BloomGelu()
344
+ self.dense_4h_to_h = nn.Linear(4 * hidden_size, hidden_size)
345
+ self.hidden_dropout = config.hidden_dropout
346
+
347
+ def forward(self, hidden_states: torch.Tensor, residual: torch.Tensor) -> torch.Tensor:
348
+ hidden_states = self.gelu_impl(self.dense_h_to_4h(hidden_states))
349
+
350
+ if self.pretraining_tp > 1 and self.slow_but_exact:
351
+ intermediate_output = torch.zeros_like(residual)
352
+ slices = self.dense_4h_to_h.weight.shape[-1] / self.pretraining_tp
353
+ for i in range(self.pretraining_tp):
354
+ intermediate_output = intermediate_output + F.linear(
355
+ hidden_states[:, :, int(i * slices) : int((i + 1) * slices)],
356
+ self.dense_4h_to_h.weight[:, int(i * slices) : int((i + 1) * slices)],
357
+ )
358
+ else:
359
+ intermediate_output = self.dense_4h_to_h(hidden_states)
360
+
361
+ output = dropout_add(intermediate_output, residual, self.hidden_dropout, self.training)
362
+
363
+ return output
364
+
365
+
366
+ class BloomBlock(nn.Module):
367
+ def __init__(self, config: BloomConfig):
368
+ super().__init__()
369
+ hidden_size = config.hidden_size
370
+
371
+ self.input_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
372
+ self.num_heads = config.n_head
373
+ self.self_attention = BloomAttention(config)
374
+ self.post_attention_layernorm = LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
375
+
376
+ self.mlp = BloomMLP(config)
377
+
378
+ self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
379
+ self.hidden_dropout = config.hidden_dropout
380
+
381
+ def forward(
382
+ self,
383
+ hidden_states: torch.Tensor,
384
+ alibi: torch.Tensor,
385
+ attention_mask: torch.Tensor,
386
+ layer_past: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,
387
+ head_mask: Optional[torch.Tensor] = None,
388
+ use_cache: bool = False,
389
+ output_attentions: bool = False,
390
+ ):
391
+ # hidden_states: [batch_size, seq_length, hidden_size]
392
+
393
+ # Layer norm at the beginning of the transformer layer.
394
+ layernorm_output = self.input_layernorm(hidden_states)
395
+
396
+ # Layer norm post the self attention.
397
+ if self.apply_residual_connection_post_layernorm:
398
+ residual = layernorm_output
399
+ else:
400
+ residual = hidden_states
401
+
402
+ # Self attention.
403
+ attn_outputs = self.self_attention(
404
+ layernorm_output,
405
+ residual,
406
+ layer_past=layer_past,
407
+ attention_mask=attention_mask,
408
+ alibi=alibi,
409
+ head_mask=head_mask,
410
+ use_cache=use_cache,
411
+ output_attentions=output_attentions,
412
+ )
413
+
414
+ attention_output = attn_outputs[0]
415
+
416
+ outputs = attn_outputs[1:]
417
+
418
+ layernorm_output = self.post_attention_layernorm(attention_output)
419
+
420
+ # Get residual
421
+ if self.apply_residual_connection_post_layernorm:
422
+ residual = layernorm_output
423
+ else:
424
+ residual = attention_output
425
+
426
+ # MLP.
427
+ output = self.mlp(layernorm_output, residual)
428
+
429
+ if use_cache:
430
+ outputs = (output,) + outputs
431
+ else:
432
+ outputs = (output,) + outputs[1:]
433
+
434
+ return outputs # hidden_states, present, attentions
435
+
436
+
437
+ class BloomPreTrainedModel(PreTrainedModel):
438
+ config_class = BloomConfig
439
+ base_model_prefix = "transformer"
440
+ supports_gradient_checkpointing = True
441
+ _no_split_modules = ["BloomBlock"]
442
+ _skip_keys_device_placement = "past_key_values"
443
+
444
+ def __init__(self, *inputs, **kwargs):
445
+ super().__init__(*inputs, **kwargs)
446
+
447
+ def _init_weights(self, module: nn.Module):
448
+ """Initialize the weights."""
449
+ if isinstance(module, nn.Linear):
450
+ # Slightly different from the TF version which uses truncated_normal for initialization
451
+ # cf https://github.com/pytorch/pytorch/pull/5617
452
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
453
+ if module.bias is not None:
454
+ module.bias.data.zero_()
455
+ elif isinstance(module, nn.Embedding):
456
+ module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
457
+ if module.padding_idx is not None:
458
+ module.weight.data[module.padding_idx].zero_()
459
+ elif isinstance(module, LayerNorm):
460
+ module.bias.data.zero_()
461
+ module.weight.data.fill_(1.0)
462
+
463
+ @staticmethod
464
+ def _convert_to_standard_cache(
465
+ past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]], batch_size: int
466
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]:
467
+ """
468
+ Standardizes the format of the cache so as to match most implementations, i.e. to tuple(tuple([batch_size,
469
+ num_heads, ...]))
470
+ """
471
+ batch_size_times_num_heads, head_dim, seq_length = past_key_value[0][0].shape
472
+ num_heads = batch_size_times_num_heads // batch_size
473
+ # key: [batch_size * num_heads, head_dim, seq_length] -> [batch_size, num_heads, head_dim, seq_length]
474
+ # value: [batch_size * num_heads, seq_length, head_dim] -> [batch_size, num_heads, seq_length, head_dim]
475
+ return tuple(
476
+ (
477
+ layer_past[0].view(batch_size, num_heads, head_dim, seq_length),
478
+ layer_past[1].view(batch_size, num_heads, seq_length, head_dim),
479
+ )
480
+ for layer_past in past_key_value
481
+ )
482
+
483
+ @staticmethod
484
+ def _convert_to_bloom_cache(
485
+ past_key_value: Tuple[Tuple[torch.Tensor, torch.Tensor]],
486
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor]]:
487
+ """
488
+ Converts the cache to the format expected by Bloom, i.e. to tuple(tuple([batch_size * num_heads, ...]))
489
+ """
490
+ batch_size, num_heads, head_dim, seq_length = past_key_value[0][0].shape
491
+ batch_size_times_num_heads = batch_size * num_heads
492
+ # key: [batch_size, num_heads, head_dim, seq_length] -> [batch_size * num_heads, head_dim, seq_length]
493
+ # value: [batch_size, num_heads, seq_length, head_dim] -> [batch_size * num_heads, seq_length, head_dim]
494
+ return tuple(
495
+ (
496
+ layer_past[0].view(batch_size_times_num_heads, head_dim, seq_length),
497
+ layer_past[1].view(batch_size_times_num_heads, seq_length, head_dim),
498
+ )
499
+ for layer_past in past_key_value
500
+ )
501
+
502
+
503
+ BLOOM_START_DOCSTRING = r"""
504
+
505
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
506
+ library implements for all its model (such as downloading or saving, resizing the input embeddings etc.)
507
+
508
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
509
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
510
+ and behavior.
511
+
512
+ Parameters:
513
+ config ([`BloomConfig`]): Model configuration class with all the parameters of the model.
514
+ Initializing with a config file does not load the weights associated with the model, only the
515
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
516
+ """
517
+
518
+ BLOOM_INPUTS_DOCSTRING = r"""
519
+ Args:
520
+ input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
521
+ `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0][0].shape[2]`
522
+ (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary.
523
+
524
+ If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
525
+ `input_ids`.
526
+
527
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
528
+ [`PreTrainedTokenizer.__call__`] for details.
529
+
530
+ [What are input IDs?](../glossary#input-ids)
531
+ past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`):
532
+ Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see
533
+ `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have
534
+ their past given to this model should not be passed as `input_ids` as they have already been computed.
535
+
536
+ Each element of `past_key_values` is a tuple (past_key, past_value):
537
+ - past_key: [batch_size * num_heads, head_dim, kv_length]
538
+ - past_value: [batch_size * num_heads, kv_length, head_dim]
539
+ attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
540
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
541
+
542
+ - 1 for tokens that are **not masked**,
543
+ - 0 for tokens that are **masked**.
544
+
545
+ [What are attention masks?](../glossary#attention-mask)
546
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
547
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
548
+
549
+ - 1 indicates the head is **not masked**,
550
+ - 0 indicates the head is **masked**.
551
+
552
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
553
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
554
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
555
+ model's internal embedding lookup matrix.
556
+
557
+ If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see
558
+ `past_key_values`).
559
+ use_cache (`bool`, *optional*):
560
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
561
+ `past_key_values`).
562
+ output_attentions (`bool`, *optional*):
563
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
564
+ tensors for more detail.
565
+ output_hidden_states (`bool`, *optional*):
566
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
567
+ more detail.
568
+ return_dict (`bool`, *optional*):
569
+ Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
570
+ """
571
+
572
+
573
+ @add_start_docstrings(
574
+ "The bare Bloom Model transformer outputting raw hidden-states without any specific head on top.",
575
+ BLOOM_START_DOCSTRING,
576
+ )
577
+ class BloomModel(BloomPreTrainedModel):
578
+ def __init__(self, config: BloomConfig):
579
+ super().__init__(config)
580
+
581
+ self.embed_dim = config.hidden_size
582
+ self.num_heads = config.n_head
583
+
584
+ # Embedding + LN Embedding
585
+ self.word_embeddings = nn.Embedding(config.vocab_size, self.embed_dim)
586
+ self.word_embeddings_layernorm = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
587
+
588
+ # Transformer blocks
589
+ self.h = nn.ModuleList([BloomBlock(config) for _ in range(config.num_hidden_layers)])
590
+
591
+ # Final Layer Norm
592
+ self.ln_f = LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
593
+
594
+ self.gradient_checkpointing = False
595
+
596
+ # Initialize weights and apply final processing
597
+ self.post_init()
598
+
599
+ def build_alibi_tensor(self, attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
600
+ return build_alibi_tensor(attention_mask, num_heads, dtype)
601
+
602
+ def get_input_embeddings(self):
603
+ return self.word_embeddings
604
+
605
+ def set_input_embeddings(self, new_embeddings: torch.Tensor):
606
+ self.word_embeddings = new_embeddings
607
+
608
+ @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
609
+ @add_code_sample_docstrings(
610
+ checkpoint=_CHECKPOINT_FOR_DOC,
611
+ output_type=BaseModelOutputWithPastAndCrossAttentions,
612
+ config_class=_CONFIG_FOR_DOC,
613
+ )
614
+ def forward(
615
+ self,
616
+ input_ids: Optional[torch.LongTensor] = None,
617
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
618
+ attention_mask: Optional[torch.Tensor] = None,
619
+ head_mask: Optional[torch.LongTensor] = None,
620
+ inputs_embeds: Optional[torch.LongTensor] = None,
621
+ use_cache: Optional[bool] = None,
622
+ output_attentions: Optional[bool] = None,
623
+ output_hidden_states: Optional[bool] = None,
624
+ return_dict: Optional[bool] = None,
625
+ **deprecated_arguments,
626
+ ) -> Union[Tuple[torch.Tensor, ...], BaseModelOutputWithPastAndCrossAttentions]:
627
+ if deprecated_arguments.pop("position_ids", False) is not False:
628
+ # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
629
+ warnings.warn(
630
+ "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
631
+ " passing `position_ids`.",
632
+ FutureWarning,
633
+ )
634
+ if len(deprecated_arguments) > 0:
635
+ raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
636
+
637
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
638
+ output_hidden_states = (
639
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
640
+ )
641
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
642
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
643
+
644
+ if input_ids is not None and inputs_embeds is not None:
645
+ raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
646
+ elif input_ids is not None:
647
+ batch_size, seq_length = input_ids.shape
648
+ elif inputs_embeds is not None:
649
+ batch_size, seq_length, _ = inputs_embeds.shape
650
+ else:
651
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
652
+
653
+ if past_key_values is None:
654
+ past_key_values = tuple([None] * len(self.h))
655
+
656
+ # Prepare head mask if needed
657
+ # 1.0 in head_mask indicate we keep the head
658
+ # attention_probs has shape batch_size x num_heads x N x N
659
+ # head_mask has shape n_layer x batch x num_heads x N x N
660
+ head_mask = self.get_head_mask(head_mask, self.config.n_layer)
661
+
662
+ if inputs_embeds is None:
663
+ inputs_embeds = self.word_embeddings(input_ids)
664
+
665
+ hidden_states = self.word_embeddings_layernorm(inputs_embeds)
666
+
667
+ presents = () if use_cache else None
668
+ all_self_attentions = () if output_attentions else None
669
+ all_hidden_states = () if output_hidden_states else None
670
+
671
+ if self.gradient_checkpointing and self.training:
672
+ if use_cache:
673
+ logger.warning_once(
674
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
675
+ )
676
+ use_cache = False
677
+
678
+ # Compute alibi tensor: check build_alibi_tensor documentation
679
+ seq_length_with_past = seq_length
680
+ past_key_values_length = 0
681
+ if past_key_values[0] is not None:
682
+ past_key_values_length = past_key_values[0][0].shape[2]
683
+ seq_length_with_past = seq_length_with_past + past_key_values_length
684
+ if attention_mask is None:
685
+ attention_mask = torch.ones((batch_size, seq_length_with_past), device=hidden_states.device)
686
+ else:
687
+ attention_mask = attention_mask.to(hidden_states.device)
688
+
689
+ alibi = self.build_alibi_tensor(attention_mask, self.num_heads, dtype=hidden_states.dtype)
690
+
691
+ causal_mask = _prepare_4d_causal_attention_mask(
692
+ attention_mask,
693
+ input_shape=(batch_size, seq_length),
694
+ inputs_embeds=inputs_embeds,
695
+ past_key_values_length=past_key_values_length,
696
+ )
697
+ causal_mask = causal_mask.bool()
698
+
699
+ for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
700
+ if output_hidden_states:
701
+ all_hidden_states = all_hidden_states + (hidden_states,)
702
+
703
+ if self.gradient_checkpointing and self.training:
704
+ outputs = self._gradient_checkpointing_func(
705
+ block.__call__,
706
+ hidden_states,
707
+ alibi,
708
+ causal_mask,
709
+ layer_past,
710
+ head_mask[i],
711
+ use_cache,
712
+ output_attentions,
713
+ )
714
+ else:
715
+ outputs = block(
716
+ hidden_states,
717
+ layer_past=layer_past,
718
+ attention_mask=causal_mask,
719
+ head_mask=head_mask[i],
720
+ use_cache=use_cache,
721
+ output_attentions=output_attentions,
722
+ alibi=alibi,
723
+ )
724
+
725
+ hidden_states = outputs[0]
726
+ if use_cache is True:
727
+ presents = presents + (outputs[1],)
728
+
729
+ if output_attentions:
730
+ all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)
731
+
732
+ # Add last hidden state
733
+ hidden_states = self.ln_f(hidden_states)
734
+
735
+ if output_hidden_states:
736
+ all_hidden_states = all_hidden_states + (hidden_states,)
737
+
738
+ if not return_dict:
739
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
740
+
741
+ return BaseModelOutputWithPastAndCrossAttentions(
742
+ last_hidden_state=hidden_states,
743
+ past_key_values=presents,
744
+ hidden_states=all_hidden_states,
745
+ attentions=all_self_attentions,
746
+ )
747
+
748
+
749
+ @add_start_docstrings(
750
+ """
751
+ The Bloom Model transformer with a language modeling head on top (linear layer with weights tied to the input
752
+ embeddings).
753
+ """,
754
+ BLOOM_START_DOCSTRING,
755
+ )
756
+ class BloomForCausalLM(BloomPreTrainedModel):
757
+ _tied_weights_keys = ["lm_head.weight"]
758
+
759
+ def __init__(self, config: BloomConfig):
760
+ super().__init__(config)
761
+ self.transformer = BloomModel(config)
762
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
763
+
764
+ # Initialize weights and apply final processing
765
+ self.post_init()
766
+
767
+ def get_output_embeddings(self):
768
+ return self.lm_head
769
+
770
+ def set_output_embeddings(self, new_embeddings: torch.Tensor):
771
+ self.lm_head = new_embeddings
772
+
773
+ def prepare_inputs_for_generation(
774
+ self,
775
+ input_ids: torch.LongTensor,
776
+ past_key_values: Optional[torch.Tensor] = None,
777
+ attention_mask: Optional[torch.Tensor] = None,
778
+ inputs_embeds: Optional[torch.Tensor] = None,
779
+ **kwargs,
780
+ ) -> dict:
781
+ # only last tokens for input_ids if past is not None
782
+ if past_key_values is not None:
783
+ past_length = past_key_values[0][0].shape[2]
784
+
785
+ # Some generation methods already pass only the last input ID
786
+ if input_ids.shape[1] > past_length:
787
+ remove_prefix_length = past_length
788
+ else:
789
+ # Default to old behavior: keep only final ID
790
+ remove_prefix_length = input_ids.shape[1] - 1
791
+
792
+ input_ids = input_ids[:, remove_prefix_length:]
793
+
794
+ # the cache may be in the stardard format (e.g. in contrastive search), convert to bloom's format if needed
795
+ if past_key_values[0][0].shape[0] == input_ids.shape[0]:
796
+ past_key_values = self._convert_to_bloom_cache(past_key_values)
797
+
798
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
799
+ if inputs_embeds is not None and past_key_values is None:
800
+ model_inputs = {"inputs_embeds": inputs_embeds}
801
+ else:
802
+ model_inputs = {"input_ids": input_ids}
803
+
804
+ model_inputs.update(
805
+ {
806
+ "past_key_values": past_key_values,
807
+ "use_cache": kwargs.get("use_cache"),
808
+ "attention_mask": attention_mask,
809
+ }
810
+ )
811
+ return model_inputs
812
+
813
+ @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
814
+ @add_code_sample_docstrings(
815
+ checkpoint=_CHECKPOINT_FOR_DOC,
816
+ output_type=CausalLMOutputWithCrossAttentions,
817
+ config_class=_CONFIG_FOR_DOC,
818
+ )
819
+ def forward(
820
+ self,
821
+ input_ids: Optional[torch.LongTensor] = None,
822
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
823
+ attention_mask: Optional[torch.Tensor] = None,
824
+ head_mask: Optional[torch.Tensor] = None,
825
+ inputs_embeds: Optional[torch.Tensor] = None,
826
+ labels: Optional[torch.Tensor] = None,
827
+ use_cache: Optional[bool] = None,
828
+ output_attentions: Optional[bool] = None,
829
+ output_hidden_states: Optional[bool] = None,
830
+ return_dict: Optional[bool] = None,
831
+ **deprecated_arguments,
832
+ ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
833
+ r"""
834
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
835
+ Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
836
+ `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
837
+ are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
838
+ """
839
+ if deprecated_arguments.pop("position_ids", False) is not False:
840
+ # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
841
+ warnings.warn(
842
+ "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
843
+ " passing `position_ids`.",
844
+ FutureWarning,
845
+ )
846
+ if len(deprecated_arguments) > 0:
847
+ raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
848
+
849
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
850
+
851
+ transformer_outputs = self.transformer(
852
+ input_ids,
853
+ past_key_values=past_key_values,
854
+ attention_mask=attention_mask,
855
+ head_mask=head_mask,
856
+ inputs_embeds=inputs_embeds,
857
+ use_cache=use_cache,
858
+ output_attentions=output_attentions,
859
+ output_hidden_states=output_hidden_states,
860
+ return_dict=return_dict,
861
+ )
862
+ hidden_states = transformer_outputs[0]
863
+
864
+ lm_logits = self.lm_head(hidden_states)
865
+
866
+ loss = None
867
+ if labels is not None:
868
+ # move labels to correct device to enable model parallelism
869
+ labels = labels.to(lm_logits.device)
870
+ # Shift so that tokens < n predict n
871
+ shift_logits = lm_logits[..., :-1, :].contiguous()
872
+ shift_labels = labels[..., 1:].contiguous()
873
+ batch_size, seq_length, vocab_size = shift_logits.shape
874
+ # Flatten the tokens
875
+ loss_fct = CrossEntropyLoss()
876
+ loss = loss_fct(
877
+ shift_logits.view(batch_size * seq_length, vocab_size), shift_labels.view(batch_size * seq_length)
878
+ )
879
+
880
+ if not return_dict:
881
+ output = (lm_logits,) + transformer_outputs[1:]
882
+ return ((loss,) + output) if loss is not None else output
883
+
884
+ return CausalLMOutputWithCrossAttentions(
885
+ loss=loss,
886
+ logits=lm_logits,
887
+ past_key_values=transformer_outputs.past_key_values,
888
+ hidden_states=transformer_outputs.hidden_states,
889
+ attentions=transformer_outputs.attentions,
890
+ )
891
+
892
+ def _reorder_cache(
893
+ self, past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
894
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
895
+ """
896
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
897
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
898
+ beam_idx at every generation step.
899
+
900
+ Output shares the same memory storage as `past`.
901
+ """
902
+ standardized_past = self._convert_to_standard_cache(past, batch_size=len(beam_idx))
903
+
904
+ # Get a copy of `beam_idx` on all the devices where we need those indices.
905
+ device_to_beam_idx = {
906
+ past_state.device: beam_idx.to(past_state.device) for layer_past in past for past_state in layer_past
907
+ }
908
+ reordered_past = tuple(
909
+ (
910
+ layer_past[0].index_select(0, device_to_beam_idx[layer_past[0].device]),
911
+ layer_past[1].index_select(0, device_to_beam_idx[layer_past[0].device]),
912
+ )
913
+ for layer_past in standardized_past
914
+ )
915
+ return self._convert_to_bloom_cache(reordered_past)
916
+
917
+
918
+ @add_start_docstrings(
919
+ """
920
+ The Bloom Model transformer with a sequence classification head on top (linear layer).
921
+
922
+ [`BloomForSequenceClassification`] uses the last token in order to do the classification, as other causal models
923
+ (e.g. GPT-1) do.
924
+
925
+ Since it does classification on the last token, it requires to know the position of the last token. If a
926
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
927
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
928
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
929
+ each row of the batch).
930
+ """,
931
+ BLOOM_START_DOCSTRING,
932
+ )
933
+ class BloomForSequenceClassification(BloomPreTrainedModel):
934
+ def __init__(self, config: BloomConfig):
935
+ super().__init__(config)
936
+ self.num_labels = config.num_labels
937
+ self.transformer = BloomModel(config)
938
+ self.score = nn.Linear(config.hidden_size, config.num_labels, bias=False)
939
+
940
+ # Initialize weights and apply final processing
941
+ self.post_init()
942
+
943
+ @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
944
+ @add_code_sample_docstrings(
945
+ checkpoint=_CHECKPOINT_FOR_DOC,
946
+ output_type=SequenceClassifierOutputWithPast,
947
+ config_class=_CONFIG_FOR_DOC,
948
+ )
949
+ def forward(
950
+ self,
951
+ input_ids: Optional[torch.LongTensor] = None,
952
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
953
+ attention_mask: Optional[torch.Tensor] = None,
954
+ head_mask: Optional[torch.Tensor] = None,
955
+ inputs_embeds: Optional[torch.Tensor] = None,
956
+ labels: Optional[torch.Tensor] = None,
957
+ use_cache: Optional[bool] = None,
958
+ output_attentions: Optional[bool] = None,
959
+ output_hidden_states: Optional[bool] = None,
960
+ return_dict: Optional[bool] = None,
961
+ **deprecated_arguments,
962
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutputWithPast]:
963
+ r"""
964
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
965
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
966
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
967
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
968
+ """
969
+ if deprecated_arguments.pop("position_ids", False) is not False:
970
+ # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
971
+ warnings.warn(
972
+ "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
973
+ " passing `position_ids`.",
974
+ FutureWarning,
975
+ )
976
+ if len(deprecated_arguments) > 0:
977
+ raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
978
+
979
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
980
+
981
+ transformer_outputs = self.transformer(
982
+ input_ids,
983
+ past_key_values=past_key_values,
984
+ attention_mask=attention_mask,
985
+ head_mask=head_mask,
986
+ inputs_embeds=inputs_embeds,
987
+ use_cache=use_cache,
988
+ output_attentions=output_attentions,
989
+ output_hidden_states=output_hidden_states,
990
+ return_dict=return_dict,
991
+ )
992
+
993
+ hidden_states = transformer_outputs[0]
994
+ logits = self.score(hidden_states)
995
+
996
+ if input_ids is not None:
997
+ batch_size = input_ids.shape[0]
998
+ else:
999
+ batch_size = inputs_embeds.shape[0]
1000
+
1001
+ if self.config.pad_token_id is None and batch_size != 1:
1002
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1003
+ if self.config.pad_token_id is None:
1004
+ sequence_lengths = -1
1005
+ else:
1006
+ if input_ids is not None:
1007
+ # if no pad token found, use modulo instead of reverse indexing for ONNX compatibility
1008
+ sequence_lengths = torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1
1009
+ sequence_lengths = sequence_lengths % input_ids.shape[-1]
1010
+ sequence_lengths = sequence_lengths.to(logits.device)
1011
+ else:
1012
+ sequence_lengths = -1
1013
+ logger.warning(
1014
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1015
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1016
+ )
1017
+
1018
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_lengths]
1019
+
1020
+ loss = None
1021
+ if labels is not None:
1022
+ if self.config.problem_type is None:
1023
+ if self.num_labels == 1:
1024
+ self.config.problem_type = "regression"
1025
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1026
+ self.config.problem_type = "single_label_classification"
1027
+ else:
1028
+ self.config.problem_type = "multi_label_classification"
1029
+
1030
+ if self.config.problem_type == "regression":
1031
+ loss_fct = MSELoss()
1032
+ if self.num_labels == 1:
1033
+ loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
1034
+ else:
1035
+ loss = loss_fct(pooled_logits, labels)
1036
+ elif self.config.problem_type == "single_label_classification":
1037
+ loss_fct = CrossEntropyLoss()
1038
+ loss = loss_fct(pooled_logits, labels)
1039
+ elif self.config.problem_type == "multi_label_classification":
1040
+ loss_fct = BCEWithLogitsLoss()
1041
+ loss = loss_fct(pooled_logits, labels)
1042
+ if not return_dict:
1043
+ output = (pooled_logits,) + transformer_outputs[1:]
1044
+ return ((loss,) + output) if loss is not None else output
1045
+
1046
+ return SequenceClassifierOutputWithPast(
1047
+ loss=loss,
1048
+ logits=pooled_logits,
1049
+ past_key_values=transformer_outputs.past_key_values,
1050
+ hidden_states=transformer_outputs.hidden_states,
1051
+ attentions=transformer_outputs.attentions,
1052
+ )
1053
+
1054
+
1055
+ @add_start_docstrings(
1056
+ """
1057
+ Bloom Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
1058
+ Named-Entity-Recognition (NER) tasks.
1059
+ """,
1060
+ BLOOM_START_DOCSTRING,
1061
+ )
1062
+ class BloomForTokenClassification(BloomPreTrainedModel):
1063
+ def __init__(self, config: BloomConfig):
1064
+ super().__init__(config)
1065
+ self.num_labels = config.num_labels
1066
+
1067
+ self.transformer = BloomModel(config)
1068
+ if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
1069
+ classifier_dropout = config.classifier_dropout
1070
+ elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None:
1071
+ classifier_dropout = config.hidden_dropout
1072
+ else:
1073
+ classifier_dropout = 0.1
1074
+ self.dropout = nn.Dropout(classifier_dropout)
1075
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
1076
+
1077
+ # Initialize weights and apply final processing
1078
+ self.post_init()
1079
+
1080
+ @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING)
1081
+ @add_code_sample_docstrings(
1082
+ checkpoint=_CHECKPOINT_FOR_DOC,
1083
+ output_type=TokenClassifierOutput,
1084
+ config_class=_CONFIG_FOR_DOC,
1085
+ )
1086
+ def forward(
1087
+ self,
1088
+ input_ids: Optional[torch.LongTensor] = None,
1089
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1090
+ attention_mask: Optional[torch.Tensor] = None,
1091
+ head_mask: Optional[torch.Tensor] = None,
1092
+ inputs_embeds: Optional[torch.Tensor] = None,
1093
+ labels: Optional[torch.Tensor] = None,
1094
+ use_cache: Optional[bool] = None,
1095
+ output_attentions: Optional[bool] = None,
1096
+ output_hidden_states: Optional[bool] = None,
1097
+ return_dict: Optional[bool] = None,
1098
+ **deprecated_arguments,
1099
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
1100
+ r"""
1101
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1102
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1103
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1104
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1105
+ """
1106
+ if deprecated_arguments.pop("position_ids", False) is not False:
1107
+ # `position_ids` could have been `torch.Tensor` or `None` so defaulting pop to `False` allows to detect if users were passing explicitly `None`
1108
+ warnings.warn(
1109
+ "`position_ids` have no functionality in BLOOM and will be removed in v5.0.0. You can safely ignore"
1110
+ " passing `position_ids`.",
1111
+ FutureWarning,
1112
+ )
1113
+ if len(deprecated_arguments) > 0:
1114
+ raise ValueError(f"Got unexpected arguments: {deprecated_arguments}")
1115
+
1116
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1117
+
1118
+ transformer_outputs = self.transformer(
1119
+ input_ids,
1120
+ past_key_values=past_key_values,
1121
+ attention_mask=attention_mask,
1122
+ head_mask=head_mask,
1123
+ inputs_embeds=inputs_embeds,
1124
+ use_cache=use_cache,
1125
+ output_attentions=output_attentions,
1126
+ output_hidden_states=output_hidden_states,
1127
+ return_dict=return_dict,
1128
+ )
1129
+
1130
+ hidden_states = transformer_outputs[0]
1131
+ hidden_states = self.dropout(hidden_states)
1132
+ logits = self.classifier(hidden_states)
1133
+
1134
+ loss = None
1135
+ if labels is not None:
1136
+ # move labels to correct device to enable model parallelism
1137
+ labels = labels.to(logits.device)
1138
+ batch_size, seq_length = labels.shape
1139
+ loss_fct = CrossEntropyLoss()
1140
+ loss = loss_fct(
1141
+ logits.view(batch_size * seq_length, self.num_labels), labels.view(batch_size * seq_length)
1142
+ )
1143
+
1144
+ if not return_dict:
1145
+ output = (logits,) + transformer_outputs[2:]
1146
+ return ((loss,) + output) if loss is not None else output
1147
+
1148
+ return TokenClassifierOutput(
1149
+ loss=loss,
1150
+ logits=logits,
1151
+ hidden_states=transformer_outputs.hidden_states,
1152
+ attentions=transformer_outputs.attentions,
1153
+ )
1154
+
1155
+
1156
+ @add_start_docstrings(
1157
+ """
1158
+ The BLOOM Model transformer with a span classification head on top for extractive question-answering tasks like
1159
+ SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
1160
+ """,
1161
+ BLOOM_START_DOCSTRING,
1162
+ )
1163
+ class BloomForQuestionAnswering(BloomPreTrainedModel):
1164
+ def __init__(self, config):
1165
+ super().__init__(config)
1166
+ self.transformer = BloomModel(config)
1167
+ self.qa_outputs = nn.Linear(config.hidden_size, 2)
1168
+
1169
+ # Initialize weights and apply final processing
1170
+ self.post_init()
1171
+
1172
+ @add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1173
+ def forward(
1174
+ self,
1175
+ input_ids: Optional[torch.LongTensor] = None,
1176
+ attention_mask: Optional[torch.FloatTensor] = None,
1177
+ position_ids: Optional[torch.LongTensor] = None,
1178
+ head_mask: Optional[torch.FloatTensor] = None,
1179
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1180
+ start_positions: Optional[torch.LongTensor] = None,
1181
+ end_positions: Optional[torch.LongTensor] = None,
1182
+ output_attentions: Optional[bool] = None,
1183
+ output_hidden_states: Optional[bool] = None,
1184
+ return_dict: Optional[bool] = None,
1185
+ ) -> Union[Tuple, QuestionAnsweringModelOutput]:
1186
+ r"""
1187
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1188
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
1189
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1190
+ are not taken into account for computing the loss.
1191
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1192
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
1193
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
1194
+ are not taken into account for computing the loss.
1195
+ """
1196
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1197
+
1198
+ outputs = self.transformer(
1199
+ input_ids,
1200
+ attention_mask=attention_mask,
1201
+ position_ids=position_ids,
1202
+ head_mask=head_mask,
1203
+ inputs_embeds=inputs_embeds,
1204
+ output_attentions=output_attentions,
1205
+ output_hidden_states=output_hidden_states,
1206
+ return_dict=return_dict,
1207
+ )
1208
+
1209
+ sequence_output = outputs[0]
1210
+
1211
+ logits = self.qa_outputs(sequence_output)
1212
+ start_logits, end_logits = logits.split(1, dim=-1)
1213
+ start_logits = start_logits.squeeze(-1).contiguous()
1214
+ end_logits = end_logits.squeeze(-1).contiguous()
1215
+
1216
+ total_loss = None
1217
+ if start_positions is not None and end_positions is not None:
1218
+ # If we are on multi-GPU, split add a dimension
1219
+ if len(start_positions.size()) > 1:
1220
+ start_positions = start_positions.squeeze(-1)
1221
+ if len(end_positions.size()) > 1:
1222
+ end_positions = end_positions.squeeze(-1)
1223
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
1224
+ ignored_index = start_logits.size(1)
1225
+ start_positions = start_positions.clamp(0, ignored_index)
1226
+ end_positions = end_positions.clamp(0, ignored_index)
1227
+
1228
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
1229
+ start_loss = loss_fct(start_logits, start_positions)
1230
+ end_loss = loss_fct(end_logits, end_positions)
1231
+ total_loss = (start_loss + end_loss) / 2
1232
+
1233
+ if not return_dict:
1234
+ output = (start_logits, end_logits) + outputs[2:]
1235
+ return ((total_loss,) + output) if total_loss is not None else output
1236
+
1237
+ return QuestionAnsweringModelOutput(
1238
+ loss=total_loss,
1239
+ start_logits=start_logits,
1240
+ end_logits=end_logits,
1241
+ hidden_states=outputs.hidden_states,
1242
+ attentions=outputs.attentions,
1243
+ )
venv/lib/python3.10/site-packages/transformers/models/codegen/__init__.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Salesforce authors, The EleutherAI, and HuggingFace Teams. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_codegen": ["CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP", "CodeGenConfig", "CodeGenOnnxConfig"],
21
+ "tokenization_codegen": ["CodeGenTokenizer"],
22
+ }
23
+
24
+ try:
25
+ if not is_tokenizers_available():
26
+ raise OptionalDependencyNotAvailable()
27
+ except OptionalDependencyNotAvailable:
28
+ pass
29
+ else:
30
+ _import_structure["tokenization_codegen_fast"] = ["CodeGenTokenizerFast"]
31
+
32
+ try:
33
+ if not is_torch_available():
34
+ raise OptionalDependencyNotAvailable()
35
+ except OptionalDependencyNotAvailable:
36
+ pass
37
+ else:
38
+ _import_structure["modeling_codegen"] = [
39
+ "CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST",
40
+ "CodeGenForCausalLM",
41
+ "CodeGenModel",
42
+ "CodeGenPreTrainedModel",
43
+ ]
44
+
45
+ if TYPE_CHECKING:
46
+ from .configuration_codegen import CODEGEN_PRETRAINED_CONFIG_ARCHIVE_MAP, CodeGenConfig, CodeGenOnnxConfig
47
+ from .tokenization_codegen import CodeGenTokenizer
48
+
49
+ try:
50
+ if not is_tokenizers_available():
51
+ raise OptionalDependencyNotAvailable()
52
+ except OptionalDependencyNotAvailable:
53
+ pass
54
+ else:
55
+ from .tokenization_codegen_fast import CodeGenTokenizerFast
56
+
57
+ try:
58
+ if not is_torch_available():
59
+ raise OptionalDependencyNotAvailable()
60
+ except OptionalDependencyNotAvailable:
61
+ pass
62
+ else:
63
+ from .modeling_codegen import (
64
+ CODEGEN_PRETRAINED_MODEL_ARCHIVE_LIST,
65
+ CodeGenForCausalLM,
66
+ CodeGenModel,
67
+ CodeGenPreTrainedModel,
68
+ )
69
+
70
+ else:
71
+ import sys
72
+
73
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)