diff --git a/ckpts/universal/global_step40/zero/10.attention.query_key_value.weight/exp_avg_sq.pt b/ckpts/universal/global_step40/zero/10.attention.query_key_value.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..97371348919d2139603fe002ad9e5f3dfdcd3232 --- /dev/null +++ b/ckpts/universal/global_step40/zero/10.attention.query_key_value.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08e3ab68253e99ba15290a82f87aa24013966f5d984803b9735b04cecf70a139 +size 50332843 diff --git a/ckpts/universal/global_step40/zero/23.attention.query_key_value.weight/exp_avg.pt b/ckpts/universal/global_step40/zero/23.attention.query_key_value.weight/exp_avg.pt new file mode 100644 index 0000000000000000000000000000000000000000..8df237ef84d74873e62246f6f669aca50a936756 --- /dev/null +++ b/ckpts/universal/global_step40/zero/23.attention.query_key_value.weight/exp_avg.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09eaa9225d4401ad0dd15f32d30d2931a2799a9d16ceb636430828c447f8e17e +size 50332828 diff --git a/lm-evaluation-harness/tests/testdata/anagrams2-v0-res.json b/lm-evaluation-harness/tests/testdata/anagrams2-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..f74887fe16ec042fcdf995b7b7b694d3fec92659 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/anagrams2-v0-res.json @@ -0,0 +1 @@ +{"results": {"anagrams2": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"anagrams2": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/anli_r2-v0-res.json b/lm-evaluation-harness/tests/testdata/anli_r2-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..6dc08ebbaa852afef27dbd6002575ada16870eb0 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/anli_r2-v0-res.json @@ -0,0 +1 @@ +{"results": {"anli_r2": {"acc": 0.356, "acc_stderr": 0.015149042659306628}}, "versions": {"anli_r2": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_3ds-v0-res.json b/lm-evaluation-harness/tests/testdata/arithmetic_3ds-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..d76cc9bdf55935bc1bc4e71d35267cb58ec618ef --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/arithmetic_3ds-v0-res.json @@ -0,0 +1 @@ +{"results": {"arithmetic_3ds": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_3ds": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/arithmetic_5da-v0-res.json b/lm-evaluation-harness/tests/testdata/arithmetic_5da-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..fb9a5671e8a4269fce5c477cbc3c795801e75fe1 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/arithmetic_5da-v0-res.json @@ -0,0 +1 @@ +{"results": {"arithmetic_5da": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_5da": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_animate_subject_passive-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_animate_subject_passive-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..47cd3d3be14eedc3d525b408e76abe69c45f8586 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_animate_subject_passive-v0-loglikelihood @@ -0,0 +1 @@ +064c38fcd072b8bd12f54ea4f8e41599ed4e11dc386e93b77e1fc07967d1f960 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_animate_subject_passive-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_animate_subject_passive-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..96a7ed5e2a8715027b1bf853cc1836b4f587a2e5 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_animate_subject_passive-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_animate_subject_passive": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_animate_subject_passive": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_complex_left_branch-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_complex_left_branch-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..2750fcda2aa5ee2efc6f20faa8932853f0f42ba2 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_complex_left_branch-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_coordinate_structure_constraint_complex_left_branch": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_coordinate_structure_constraint_complex_left_branch": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_inchoative-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_inchoative-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..b494980087dc4ac33621cca2fe716f1fee83fbd1 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_inchoative-v0-loglikelihood @@ -0,0 +1 @@ +3ff73629fb4473986a0e8ae2fcb7c40e88292189ab0d8755d20836c5aa5a2f99 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_intransitive-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_intransitive-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..b16238545d5e94fa8c1c8e3166bf0d00863dbf89 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_intransitive-v0-loglikelihood @@ -0,0 +1 @@ +6469ae3b0d46b008846b5fd132f2d2b26ea2858745d056df1470b89aa97a790f \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_npi_present_1-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_npi_present_1-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..8e4ae8d6efba191c09ebc369b93437a441f188cb --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_npi_present_1-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_npi_present_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_npi_present_1": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_only_npi_scope-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/blimp_only_npi_scope-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..f1846d3e936ffc75f39f0776024014444a2879bb --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_only_npi_scope-v0-loglikelihood @@ -0,0 +1 @@ +fc0be817478c212327050fa297ef61ad214f4847dbff61d4e0fe7914c06a1691 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_principle_A_case_2-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_principle_A_case_2-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..ec8108c88d9554aefbeb34e6e0432e490253d26c --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_principle_A_case_2-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_principle_A_case_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_case_2": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_transitive-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_transitive-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..d2c99ab803288212934142c2507a8c316695a34b --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_transitive-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_transitive": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_transitive": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap-v0-res.json b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..dfd3f66b77cb52234d967a827a3c6dffc706e5aa --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap-v0-res.json @@ -0,0 +1 @@ +{"results": {"blimp_wh_vs_that_no_gap": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_vs_that_no_gap": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/cb-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/cb-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..6fa6f6dae6c806be8a5cad8416df6766f22ae475 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/cb-v0-loglikelihood @@ -0,0 +1 @@ +ec3b1bbb9561e39c43c6f77a23b4060b15c606141c5346e3d0791b3e92aaa5d0 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_nationality-v0-res.json b/lm-evaluation-harness/tests/testdata/crows_pairs_english_nationality-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..5fd526ccc1c07111d2cceef633ccb72b0d65387b --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_nationality-v0-res.json @@ -0,0 +1 @@ +{"results": {"crows_pairs_english_nationality": {"likelihood_difference": 0.3383027778174895, "likelihood_difference_stderr": 0.015957585374543233, "pct_stereotype": 0.4675925925925926, "pct_stereotype_stderr": 0.03402801581358966}}, "versions": {"crows_pairs_english_nationality": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_socioeconomic-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_english_socioeconomic-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..32065ff76227a91aa5631f95b66ff1ce19490800 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_socioeconomic-v0-loglikelihood @@ -0,0 +1 @@ +c309eabfd247a702e32efc4e08211f9a72693d38995be5dd444d497b476396bd \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_english_socioeconomic-v0-res.json b/lm-evaluation-harness/tests/testdata/crows_pairs_english_socioeconomic-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..89bd7338ada6ff7ef485492c5656342881b70600 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/crows_pairs_english_socioeconomic-v0-res.json @@ -0,0 +1 @@ +{"results": {"crows_pairs_english_socioeconomic": {"likelihood_difference": 0.3424577735757881, "likelihood_difference_stderr": 0.017459994170011896, "pct_stereotype": 0.46842105263157896, "pct_stereotype_stderr": 0.036297038088316094}}, "versions": {"crows_pairs_english_socioeconomic": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_nationality-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_french_nationality-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..e6e282414b02b032bd5b879775686c24e731fd9d --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_nationality-v0-loglikelihood @@ -0,0 +1 @@ +146eb60c8796fe3f25307a6776337f0b077b58ce02edec64c99df4b906c19b9f \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/crows_pairs_french_religion-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/crows_pairs_french_religion-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..b31daf0e281664ab74ae88a9edd6bb1029f28d57 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/crows_pairs_french_religion-v0-loglikelihood @@ -0,0 +1 @@ +8af6445eeb634dad5f0723e40615afe993e1e3f129a4f314fe4117e633c2efd3 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/cycle_letters-v0-greedy_until b/lm-evaluation-harness/tests/testdata/cycle_letters-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..9068a24ef5af549a13fe5b4362c2b5afc741bd29 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/cycle_letters-v0-greedy_until @@ -0,0 +1 @@ +eb23f7d5de7528eefd8ed5f8054c402ff947319cccfef7195995946f99389201 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/cycle_letters-v0-res.json b/lm-evaluation-harness/tests/testdata/cycle_letters-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..5b05a9430e90ec2ce0ddcb49a243be9479d3fad1 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/cycle_letters-v0-res.json @@ -0,0 +1 @@ +{"results": {"cycle_letters": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"cycle_letters": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/ethics_cm-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/ethics_cm-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..69289144e0e3ceb0051596d5768b70667f7d19a8 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/ethics_cm-v0-loglikelihood @@ -0,0 +1 @@ +92d136ebb2bd86cd036e61699ad9a1417dbb48651f0a3afa5045cf57cef5a3f6 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/gsm8k-v0-greedy_until b/lm-evaluation-harness/tests/testdata/gsm8k-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..d49400007f95ecd048628bb2f1cadf92132bef24 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/gsm8k-v0-greedy_until @@ -0,0 +1 @@ +e7292dbdd7fd8419ba954f2e0701e04c8d0e8842fe053dbf2fe47d926630e35e \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/headqa-v0-res.json b/lm-evaluation-harness/tests/testdata/headqa-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..adc093cf62c2f807a0f413d0ecc200879931a5b7 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/headqa-v0-res.json @@ -0,0 +1 @@ +{"results": {"headqa": {"acc": 0.23559445660102116, "acc_norm": 0.25018234865062, "acc_norm_stderr": 0.008272783230806014, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..67bc2e7be6de4ba9d6b9aa40c0d45cd60d7d506b --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-anatomy-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-anatomy": {"acc": 0.2222222222222222, "acc_norm": 0.23703703703703705, "acc_norm_stderr": 0.03673731683969506, "acc_stderr": 0.0359144408419697}}, "versions": {"hendrycksTest-anatomy": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-clinical_knowledge-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-clinical_knowledge-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..596bb28a93f52c857b6a39d416114c12c7ea9147 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-clinical_knowledge-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-clinical_knowledge": {"acc": 0.23773584905660378, "acc_norm": 0.27169811320754716, "acc_norm_stderr": 0.027377706624670713, "acc_stderr": 0.02619980880756191}}, "versions": {"hendrycksTest-clinical_knowledge": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-college_chemistry-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_chemistry-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..4dc95a151ac2da73b3c5eb23e3fe24a7ccc8024d --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_chemistry-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-college_chemistry": {"acc": 0.28, "acc_norm": 0.26, "acc_norm_stderr": 0.04408440022768078, "acc_stderr": 0.04512608598542127}}, "versions": {"hendrycksTest-college_chemistry": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-college_computer_science-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_computer_science-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..aea595c09f5baf6d21867c47fd5e42152244f555 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_computer_science-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-college_computer_science": {"acc": 0.22, "acc_norm": 0.24, "acc_norm_stderr": 0.04292346959909282, "acc_stderr": 0.041633319989322695}}, "versions": {"hendrycksTest-college_computer_science": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-college_physics-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_physics-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..7c2e2f4bf73266d532c7514c98defcba0133f231 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-college_physics-v0-loglikelihood @@ -0,0 +1 @@ +704a7671ef981fb95594782bc446dd632e87ebdbe89436a0603b714fb5786c75 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-conceptual_physics-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-conceptual_physics-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..05c4db0e2290998cb650c11373f0947c3be8f297 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-conceptual_physics-v0-loglikelihood @@ -0,0 +1 @@ +622f191ccfc7a597d99f39897ebe3f95a9ddce0e662fcfb411aa554b289bb355 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-elementary_mathematics-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-elementary_mathematics-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..84cd983ee9d33f831ee397ffd8b11990b70a4b60 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-elementary_mathematics-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-elementary_mathematics": {"acc": 0.2724867724867725, "acc_norm": 0.2830687830687831, "acc_norm_stderr": 0.023201392938194978, "acc_stderr": 0.022930973071633345}}, "versions": {"hendrycksTest-elementary_mathematics": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_computer_science-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_computer_science-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..bbc2dacf5f5ac0b14327f0637b4b1aabea7a6167 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_computer_science-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-high_school_computer_science": {"acc": 0.2, "acc_norm": 0.22, "acc_norm_stderr": 0.04163331998932269, "acc_stderr": 0.04020151261036845}}, "versions": {"hendrycksTest-high_school_computer_science": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_government_and_politics-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_government_and_politics-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..12ea726b4ba81f264017a7fd71d18a6ac318b0ab --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_government_and_politics-v0-loglikelihood @@ -0,0 +1 @@ +11f40d8f48ba5cd739e21d54c3c04d3761f81df5cb7ddd77df868d24ced44b49 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_statistics-v0-loglikelihood b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_statistics-v0-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..8a915ef7fc0ab9a7c290867450265a7cadd40494 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_statistics-v0-loglikelihood @@ -0,0 +1 @@ +33d1d6eaaa2c3a944bf49d3f220a4efc328d7c3b3465b7cec40ae36d8984b75f \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_statistics-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_statistics-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..4c6a21d7dac4cd7b6fa217e8bebf34d959554a7a --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_statistics-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-high_school_statistics": {"acc": 0.2962962962962963, "acc_norm": 0.3055555555555556, "acc_norm_stderr": 0.03141554629402544, "acc_stderr": 0.03114144782353604}}, "versions": {"hendrycksTest-high_school_statistics": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-nutrition-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-nutrition-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..e2838f880581f7cf743d83ba99a26827c18a09de --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-nutrition-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-nutrition": {"acc": 0.24509803921568626, "acc_norm": 0.28104575163398693, "acc_norm_stderr": 0.025738854797818723, "acc_stderr": 0.02463004897982476}}, "versions": {"hendrycksTest-nutrition": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/hendrycksTest-professional_law-v0-res.json b/lm-evaluation-harness/tests/testdata/hendrycksTest-professional_law-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..f15a9b34ff26e1382d04b4d6e41fdae6085b30c8 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/hendrycksTest-professional_law-v0-res.json @@ -0,0 +1 @@ +{"results": {"hendrycksTest-professional_law": {"acc": 0.2561929595827901, "acc_norm": 0.2470664928292047, "acc_norm_stderr": 0.011015752255279352, "acc_stderr": 0.011149173153110582}}, "versions": {"hendrycksTest-professional_law": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/math_num_theory-v0-res.json b/lm-evaluation-harness/tests/testdata/math_num_theory-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..a27a38fa9d4f3a924828bdb4526953a35328c7e5 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/math_num_theory-v0-res.json @@ -0,0 +1 @@ +{"results": {"math_num_theory": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_num_theory": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/math_prealgebra-v0-greedy_until b/lm-evaluation-harness/tests/testdata/math_prealgebra-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..5200f4cfa9ed3a735661e987791bf1434555db6e --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/math_prealgebra-v0-greedy_until @@ -0,0 +1 @@ +752cdf343d7152e476b0273065024f6ea0e0f47ea385c6bdf9067736cb39724a \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/math_precalc-v1-greedy_until b/lm-evaluation-harness/tests/testdata/math_precalc-v1-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..71bbd8d9c221ca484d517bda46c109b2610f79f6 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/math_precalc-v1-greedy_until @@ -0,0 +1 @@ +bc834b06fd79473ca6fe38a51b714aad0bf0478c1b0eec787eca34dbdf69cb71 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/mutual-v1-loglikelihood b/lm-evaluation-harness/tests/testdata/mutual-v1-loglikelihood new file mode 100644 index 0000000000000000000000000000000000000000..0022f466d25f3e3a639720e4600732c9c0c1141d --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/mutual-v1-loglikelihood @@ -0,0 +1 @@ +f759213a28f0412510bf1a24c9cab0dae64bdee902d42a26225295445e7779db \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_bookcorpus2-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_bookcorpus2-v0-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..b37a91cc2dea829e8dab7bb0fe934442c54b3a26 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_bookcorpus2-v0-loglikelihood_rolling @@ -0,0 +1 @@ +5c17ddfebeab8c41dabadb6fc216ceda91e3fe5dc95aaf1b2c843d7f11828b03 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..860aa06c974e58d03f54ab1d9cb14c7e98019d4e --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v0-res.json @@ -0,0 +1 @@ +{"results": {"pile_dm-mathematics": {"bits_per_byte": 6.176600873627999e-05, "byte_perplexity": 1.0000617679162955, "word_perplexity": 1.0002875035042451}}, "versions": {"pile_dm-mathematics": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v1-res.json new file mode 100644 index 0000000000000000000000000000000000000000..192e9066a42acf28436ae325a212b2a7c2ebf517 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_dm-mathematics-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_dm-mathematics": {"bits_per_byte": 8.910951449933553e-05, "byte_perplexity": 1.0000617679162955, "word_perplexity": 1.0002875035042451}}, "versions": {"pile_dm-mathematics": 1}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_enron-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_enron-v0-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..57dbe764605ef5e1e4578682549a001c851704c0 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_enron-v0-loglikelihood_rolling @@ -0,0 +1 @@ +4baa6ccdc9e3aa9921675ab4400d5e89d7b546b844a8ea28f6461d649066418a \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_github-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_github-v0-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..cf8251e4f68e2e893624142031e80d4d5777f4f2 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_github-v0-loglikelihood_rolling @@ -0,0 +1 @@ +df384c3df3d8f53273e97127c5bb84c17e638acad7d6bc9c91f6dee96d43b639 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_hackernews-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_hackernews-v0-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..48b767bfe706bb035e4553ea9c4119347303bab9 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_hackernews-v0-loglikelihood_rolling @@ -0,0 +1 @@ +ec1082ee5a5326e0d57aa4e73b634937140c1de9af95f154e8ab57b05d9b422b \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_philpapers-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_philpapers-v0-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..4fbbc241ba9487c2513cdf46dbb76e004e401418 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_philpapers-v0-loglikelihood_rolling @@ -0,0 +1 @@ +339ba5d8c044c4a3ff9b9a8eaa24da1d6c01b72972074eb671a7da049eeb7047 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_uspto-v0-res.json b/lm-evaluation-harness/tests/testdata/pile_uspto-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..c13dfc73f5927415055cf393fb16bd13ba6b1b56 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_uspto-v0-res.json @@ -0,0 +1 @@ +{"results": {"pile_uspto": {"bits_per_byte": 0.00012062434384130924, "byte_perplexity": 1.00012063161925, "word_perplexity": 1.0007716198916954}}, "versions": {"pile_uspto": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_wikipedia-v1-res.json b/lm-evaluation-harness/tests/testdata/pile_wikipedia-v1-res.json new file mode 100644 index 0000000000000000000000000000000000000000..4f2314e66b3a5dbd9ed3c25d9e9a97c7d1fbff3d --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_wikipedia-v1-res.json @@ -0,0 +1 @@ +{"results": {"pile_wikipedia": {"bits_per_byte": 0.00024287370359008176, "byte_perplexity": 1.0001683613940646, "word_perplexity": 1.001084677949439}}, "versions": {"pile_wikipedia": 1}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v0-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..81c2e5ed06321b250a08a4232b3720ea5b650156 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v0-loglikelihood_rolling @@ -0,0 +1 @@ +68263c52adc0086011e2220b619983935cabb1cc1f5f9f8ee1a74ab2a7457967 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/pubmedqa-v0-res.json b/lm-evaluation-harness/tests/testdata/pubmedqa-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..bb39463a4ab7244109901cbbc06ded3192ee0480 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/pubmedqa-v0-res.json @@ -0,0 +1 @@ +{"results": {"pubmedqa": {"acc": 0.324, "acc_stderr": 0.01480686473373886}}, "versions": {"pubmedqa": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/reversed_words-v0-greedy_until b/lm-evaluation-harness/tests/testdata/reversed_words-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..3f28488a9028fed32a088de9a2e8c0fac4fd12de --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/reversed_words-v0-greedy_until @@ -0,0 +1 @@ +1d79fc4f0177f9624a487b9973f4e0e1d3f8404993b419a7b807a690ebbbb290 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/reversed_words-v0-res.json b/lm-evaluation-harness/tests/testdata/reversed_words-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..9285ff2694c140b120aca438098daa39fc282a87 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/reversed_words-v0-res.json @@ -0,0 +1 @@ +{"results": {"reversed_words": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"reversed_words": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/truthfulqa_gen-v0-greedy_until b/lm-evaluation-harness/tests/testdata/truthfulqa_gen-v0-greedy_until new file mode 100644 index 0000000000000000000000000000000000000000..52156c85072e4f1a829345a4b9eef7af2c2ca059 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/truthfulqa_gen-v0-greedy_until @@ -0,0 +1 @@ +0d7c56e1aa71ffd8f94bde28f6e8dfdd35f7aaadffa0620bd2a27704253d6c14 \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/truthfulqa_gen-v1-res.json b/lm-evaluation-harness/tests/testdata/truthfulqa_gen-v1-res.json new file mode 100644 index 0000000000000000000000000000000000000000..30aa72f2bafd0788837ca50fa9d5c75f954daef0 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/truthfulqa_gen-v1-res.json @@ -0,0 +1 @@ +{"results": {"truthfulqa_gen": {"bleu_acc": 0.0, "bleu_acc_stderr": 0.0, "bleu_diff": 0.0, "bleu_diff_stderr": 0.0, "bleu_max": 0.0, "bleu_max_stderr": 0.0, "bleurt_acc": 0.835985312117503, "bleurt_acc_stderr": 0.012962704327492454, "bleurt_diff": 0.14077322143090107, "bleurt_diff_stderr": 0.005459888909582694, "bleurt_max": -1.4399358725752065, "bleurt_max_stderr": 0.0022126992369197133, "rouge1_acc": 0.0, "rouge1_acc_stderr": 0.0, "rouge1_diff": 0.0, "rouge1_diff_stderr": 0.0, "rouge1_max": 0.0, "rouge1_max_stderr": 0.0, "rouge2_acc": 0.0, "rouge2_acc_stderr": 0.0, "rouge2_diff": 0.0, "rouge2_diff_stderr": 0.0, "rouge2_max": 0.0, "rouge2_max_stderr": 0.0, "rougeL_acc": 0.0, "rougeL_acc_stderr": 0.0, "rougeL_diff": 0.0, "rougeL_diff_stderr": 0.0, "rougeL_max": 0.0, "rougeL_max_stderr": 0.0}}, "versions": {"truthfulqa_gen": 1}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wikitext-v0-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/wikitext-v0-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..f09af45a38c0de097358c587420858c7a53a10aa --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wikitext-v0-loglikelihood_rolling @@ -0,0 +1 @@ +b6f83e6cf7535ee41b0057c3e2ec2cf7f2fa5a9119b305c479a83091d1142b2c \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wikitext-v1-loglikelihood_rolling b/lm-evaluation-harness/tests/testdata/wikitext-v1-loglikelihood_rolling new file mode 100644 index 0000000000000000000000000000000000000000..f09af45a38c0de097358c587420858c7a53a10aa --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wikitext-v1-loglikelihood_rolling @@ -0,0 +1 @@ +b6f83e6cf7535ee41b0057c3e2ec2cf7f2fa5a9119b305c479a83091d1142b2c \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-de-fr-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-de-fr-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..79a0d12fe6f5750749e56dc3919283f71d021fa0 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-de-fr-v0-res.json @@ -0,0 +1 @@ +{"results": {"wmt20-de-fr": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.011897164096796364, "chrf_stderr": 0.00010158164726118333, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-de-fr": 0}} \ No newline at end of file diff --git a/lm-evaluation-harness/tests/testdata/wmt20-en-ta-v0-res.json b/lm-evaluation-harness/tests/testdata/wmt20-en-ta-v0-res.json new file mode 100644 index 0000000000000000000000000000000000000000..b04f968d76055c494762ec6cd2a84327c8351742 --- /dev/null +++ b/lm-evaluation-harness/tests/testdata/wmt20-en-ta-v0-res.json @@ -0,0 +1 @@ +{"results": {"wmt20-en-ta": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.0, "chrf_stderr": 0.0, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-ta": 0}} \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 new file mode 100644 index 0000000000000000000000000000000000000000..9351606de26547246c807a6f74ffa81c84448456 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/algorithms/flow/tests/netgen-2.gpickle.bz2 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b17e66cdeda8edb8d1dec72626c77f1f65dd4675e3f76dc2fc4fd84aa038e30 +size 18972 diff --git a/venv/lib/python3.10/site-packages/networkx/generators/atlas.dat.gz b/venv/lib/python3.10/site-packages/networkx/generators/atlas.dat.gz new file mode 100644 index 0000000000000000000000000000000000000000..2cb5d1308c1168df6218cee0a97552c5787a29b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/networkx/generators/atlas.dat.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73fc416df0164923607751cb759f4ae81deb5f6550bf25be59c86de3b747e41d +size 8887 diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.all-named-index.parquet b/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.all-named-index.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9ac3cea6476f0799521059c755ca806751ded8ab --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.all-named-index.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60f1945edc3e4ec38f6e234389e647a1b369de8afb9c7840c491a39880c0caa1 +size 3948 diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.column-metadata-handling.parquet b/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.column-metadata-handling.parquet new file mode 100644 index 0000000000000000000000000000000000000000..9ff4f6d37d08035af5855b426e905cdb9d95e12e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.column-metadata-handling.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eec79b660a5f75c3f7ed092c4f71610cbeaf380f6047a1816aae3834256d398c +size 2012 diff --git a/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.parquet b/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.parquet new file mode 100644 index 0000000000000000000000000000000000000000..2b5ab612e8045f7cc6e4b7052f3a1af84a9400a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pyarrow/tests/data/parquet/v0.7.1.parquet @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:be6773848ce905b99192adc68f0c3b2aabab7d214db50b92a52203790566ab2b +size 4372 diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/__init__.py b/venv/lib/python3.10/site-packages/transformers/models/convbert/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f1b19a949abbef25ed52f7e0d0d1efd6c2410d12 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/convbert/__init__.py @@ -0,0 +1,130 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import TYPE_CHECKING + +from ...utils import ( + OptionalDependencyNotAvailable, + _LazyModule, + is_tf_available, + is_tokenizers_available, + is_torch_available, +) + + +_import_structure = { + "configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"], + "tokenization_convbert": ["ConvBertTokenizer"], +} + +try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["tokenization_convbert_fast"] = ["ConvBertTokenizerFast"] + +try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_convbert"] = [ + "CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "ConvBertForMaskedLM", + "ConvBertForMultipleChoice", + "ConvBertForQuestionAnswering", + "ConvBertForSequenceClassification", + "ConvBertForTokenClassification", + "ConvBertLayer", + "ConvBertModel", + "ConvBertPreTrainedModel", + "load_tf_weights_in_convbert", + ] + + +try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() +except OptionalDependencyNotAvailable: + pass +else: + _import_structure["modeling_tf_convbert"] = [ + "TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", + "TFConvBertForMaskedLM", + "TFConvBertForMultipleChoice", + "TFConvBertForQuestionAnswering", + "TFConvBertForSequenceClassification", + "TFConvBertForTokenClassification", + "TFConvBertLayer", + "TFConvBertModel", + "TFConvBertPreTrainedModel", + ] + + +if TYPE_CHECKING: + from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig + from .tokenization_convbert import ConvBertTokenizer + + try: + if not is_tokenizers_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .tokenization_convbert_fast import ConvBertTokenizerFast + + try: + if not is_torch_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_convbert import ( + CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + ConvBertForMaskedLM, + ConvBertForMultipleChoice, + ConvBertForQuestionAnswering, + ConvBertForSequenceClassification, + ConvBertForTokenClassification, + ConvBertLayer, + ConvBertModel, + ConvBertPreTrainedModel, + load_tf_weights_in_convbert, + ) + + try: + if not is_tf_available(): + raise OptionalDependencyNotAvailable() + except OptionalDependencyNotAvailable: + pass + else: + from .modeling_tf_convbert import ( + TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, + TFConvBertForMaskedLM, + TFConvBertForMultipleChoice, + TFConvBertForQuestionAnswering, + TFConvBertForSequenceClassification, + TFConvBertForTokenClassification, + TFConvBertLayer, + TFConvBertModel, + TFConvBertPreTrainedModel, + ) + + +else: + import sys + + sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert_fast.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert_fast.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8d5af776d369960480a1d8f219d39e010f98f2e Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/convbert/__pycache__/tokenization_convbert_fast.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/configuration_convbert.py b/venv/lib/python3.10/site-packages/transformers/models/convbert/configuration_convbert.py new file mode 100644 index 0000000000000000000000000000000000000000..d309ca396baffcfa707ecd599f7b4d280dc348a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/convbert/configuration_convbert.py @@ -0,0 +1,160 @@ +# coding=utf-8 +# Copyright The HuggingFace team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" ConvBERT model configuration""" + +from collections import OrderedDict +from typing import Mapping + +from ...configuration_utils import PretrainedConfig +from ...onnx import OnnxConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class ConvBertConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ConvBertModel`]. It is used to instantiate an + ConvBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration + with the defaults will yield a similar configuration to that of the ConvBERT + [YituTech/conv-bert-base](https://huggingface.co/YituTech/conv-bert-base) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + vocab_size (`int`, *optional*, defaults to 30522): + Vocabulary size of the ConvBERT model. Defines the number of different tokens that can be represented by + the `inputs_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`]. + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): + The dropout ratio for the attention probabilities. + max_position_embeddings (`int`, *optional*, defaults to 512): + The maximum sequence length that this model might ever be used with. Typically set this to something large + just in case (e.g., 512 or 1024 or 2048). + type_vocab_size (`int`, *optional*, defaults to 2): + The vocabulary size of the `token_type_ids` passed when calling [`ConvBertModel`] or [`TFConvBertModel`]. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + head_ratio (`int`, *optional*, defaults to 2): + Ratio gamma to reduce the number of attention heads. + num_groups (`int`, *optional*, defaults to 1): + The number of groups for grouped linear layers for ConvBert model + conv_kernel_size (`int`, *optional*, defaults to 9): + The size of the convolutional kernel. + classifier_dropout (`float`, *optional*): + The dropout ratio for the classification head. + + Example: + + ```python + >>> from transformers import ConvBertConfig, ConvBertModel + + >>> # Initializing a ConvBERT convbert-base-uncased style configuration + >>> configuration = ConvBertConfig() + + >>> # Initializing a model (with random weights) from the convbert-base-uncased style configuration + >>> model = ConvBertModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "convbert" + + def __init__( + self, + vocab_size=30522, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.1, + attention_probs_dropout_prob=0.1, + max_position_embeddings=512, + type_vocab_size=2, + initializer_range=0.02, + layer_norm_eps=1e-12, + pad_token_id=1, + bos_token_id=0, + eos_token_id=2, + embedding_size=768, + head_ratio=2, + conv_kernel_size=9, + num_groups=1, + classifier_dropout=None, + **kwargs, + ): + super().__init__( + pad_token_id=pad_token_id, + bos_token_id=bos_token_id, + eos_token_id=eos_token_id, + **kwargs, + ) + + self.vocab_size = vocab_size + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.max_position_embeddings = max_position_embeddings + self.type_vocab_size = type_vocab_size + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.embedding_size = embedding_size + self.head_ratio = head_ratio + self.conv_kernel_size = conv_kernel_size + self.num_groups = num_groups + self.classifier_dropout = classifier_dropout + + +# Copied from transformers.models.bert.configuration_bert.BertOnnxConfig +class ConvBertOnnxConfig(OnnxConfig): + @property + def inputs(self) -> Mapping[str, Mapping[int, str]]: + if self.task == "multiple-choice": + dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} + else: + dynamic_axis = {0: "batch", 1: "sequence"} + return OrderedDict( + [ + ("input_ids", dynamic_axis), + ("attention_mask", dynamic_axis), + ("token_type_ids", dynamic_axis), + ] + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py b/venv/lib/python3.10/site-packages/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py new file mode 100644 index 0000000000000000000000000000000000000000..3d4ff779874b30b0c094c596cedaca597e03ed36 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# Copyright 2020 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Convert ConvBERT checkpoint.""" + +import argparse + +from transformers import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert +from transformers.utils import logging + + +logging.set_verbosity_info() + + +def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path): + conf = ConvBertConfig.from_json_file(convbert_config_file) + model = ConvBertModel(conf) + + model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path) + model.save_pretrained(pytorch_dump_path) + + tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True) + tf_model.save_pretrained(pytorch_dump_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # Required parameters + parser.add_argument( + "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." + ) + parser.add_argument( + "--convbert_config_file", + default=None, + type=str, + required=True, + help=( + "The config json file corresponding to the pre-trained ConvBERT model. \n" + "This specifies the model architecture." + ), + ) + parser.add_argument( + "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." + ) + args = parser.parse_args() + convert_orig_tf1_checkpoint_to_pytorch(args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path) diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/modeling_convbert.py b/venv/lib/python3.10/site-packages/transformers/models/convbert/modeling_convbert.py new file mode 100644 index 0000000000000000000000000000000000000000..d88add4e1390ef790c670c2407f280a8b4ab743a --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/convbert/modeling_convbert.py @@ -0,0 +1,1337 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" PyTorch ConvBERT model.""" + + +import math +import os +from operator import attrgetter +from typing import Optional, Tuple, Union + +import torch +import torch.utils.checkpoint +from torch import nn +from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss + +from ...activations import ACT2FN, get_activation +from ...modeling_outputs import ( + BaseModelOutputWithCrossAttentions, + MaskedLMOutput, + MultipleChoiceModelOutput, + QuestionAnsweringModelOutput, + SequenceClassifierOutput, + TokenClassifierOutput, +) +from ...modeling_utils import PreTrainedModel, SequenceSummary +from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer +from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging +from .configuration_convbert import ConvBertConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base" +_CONFIG_FOR_DOC = "ConvBertConfig" + + +from ..deprecated._archive_maps import CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +def load_tf_weights_in_convbert(model, config, tf_checkpoint_path): + """Load tf checkpoints in a pytorch model.""" + try: + import tensorflow as tf + except ImportError: + logger.error( + "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " + "https://www.tensorflow.org/install/ for installation instructions." + ) + raise + tf_path = os.path.abspath(tf_checkpoint_path) + logger.info(f"Converting TensorFlow checkpoint from {tf_path}") + # Load weights from TF model + init_vars = tf.train.list_variables(tf_path) + tf_data = {} + for name, shape in init_vars: + logger.info(f"Loading TF weight {name} with shape {shape}") + array = tf.train.load_variable(tf_path, name) + tf_data[name] = array + + param_mapping = { + "embeddings.word_embeddings.weight": "electra/embeddings/word_embeddings", + "embeddings.position_embeddings.weight": "electra/embeddings/position_embeddings", + "embeddings.token_type_embeddings.weight": "electra/embeddings/token_type_embeddings", + "embeddings.LayerNorm.weight": "electra/embeddings/LayerNorm/gamma", + "embeddings.LayerNorm.bias": "electra/embeddings/LayerNorm/beta", + "embeddings_project.weight": "electra/embeddings_project/kernel", + "embeddings_project.bias": "electra/embeddings_project/bias", + } + if config.num_groups > 1: + group_dense_name = "g_dense" + else: + group_dense_name = "dense" + + for j in range(config.num_hidden_layers): + param_mapping[ + f"encoder.layer.{j}.attention.self.query.weight" + ] = f"electra/encoder/layer_{j}/attention/self/query/kernel" + param_mapping[ + f"encoder.layer.{j}.attention.self.query.bias" + ] = f"electra/encoder/layer_{j}/attention/self/query/bias" + param_mapping[ + f"encoder.layer.{j}.attention.self.key.weight" + ] = f"electra/encoder/layer_{j}/attention/self/key/kernel" + param_mapping[ + f"encoder.layer.{j}.attention.self.key.bias" + ] = f"electra/encoder/layer_{j}/attention/self/key/bias" + param_mapping[ + f"encoder.layer.{j}.attention.self.value.weight" + ] = f"electra/encoder/layer_{j}/attention/self/value/kernel" + param_mapping[ + f"encoder.layer.{j}.attention.self.value.bias" + ] = f"electra/encoder/layer_{j}/attention/self/value/bias" + param_mapping[ + f"encoder.layer.{j}.attention.self.key_conv_attn_layer.depthwise.weight" + ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/depthwise_kernel" + param_mapping[ + f"encoder.layer.{j}.attention.self.key_conv_attn_layer.pointwise.weight" + ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/pointwise_kernel" + param_mapping[ + f"encoder.layer.{j}.attention.self.key_conv_attn_layer.bias" + ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_key/bias" + param_mapping[ + f"encoder.layer.{j}.attention.self.conv_kernel_layer.weight" + ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_kernel/kernel" + param_mapping[ + f"encoder.layer.{j}.attention.self.conv_kernel_layer.bias" + ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_kernel/bias" + param_mapping[ + f"encoder.layer.{j}.attention.self.conv_out_layer.weight" + ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_point/kernel" + param_mapping[ + f"encoder.layer.{j}.attention.self.conv_out_layer.bias" + ] = f"electra/encoder/layer_{j}/attention/self/conv_attn_point/bias" + param_mapping[ + f"encoder.layer.{j}.attention.output.dense.weight" + ] = f"electra/encoder/layer_{j}/attention/output/dense/kernel" + param_mapping[ + f"encoder.layer.{j}.attention.output.LayerNorm.weight" + ] = f"electra/encoder/layer_{j}/attention/output/LayerNorm/gamma" + param_mapping[ + f"encoder.layer.{j}.attention.output.dense.bias" + ] = f"electra/encoder/layer_{j}/attention/output/dense/bias" + param_mapping[ + f"encoder.layer.{j}.attention.output.LayerNorm.bias" + ] = f"electra/encoder/layer_{j}/attention/output/LayerNorm/beta" + param_mapping[ + f"encoder.layer.{j}.intermediate.dense.weight" + ] = f"electra/encoder/layer_{j}/intermediate/{group_dense_name}/kernel" + param_mapping[ + f"encoder.layer.{j}.intermediate.dense.bias" + ] = f"electra/encoder/layer_{j}/intermediate/{group_dense_name}/bias" + param_mapping[ + f"encoder.layer.{j}.output.dense.weight" + ] = f"electra/encoder/layer_{j}/output/{group_dense_name}/kernel" + param_mapping[ + f"encoder.layer.{j}.output.dense.bias" + ] = f"electra/encoder/layer_{j}/output/{group_dense_name}/bias" + param_mapping[ + f"encoder.layer.{j}.output.LayerNorm.weight" + ] = f"electra/encoder/layer_{j}/output/LayerNorm/gamma" + param_mapping[f"encoder.layer.{j}.output.LayerNorm.bias"] = f"electra/encoder/layer_{j}/output/LayerNorm/beta" + + for param in model.named_parameters(): + param_name = param[0] + retriever = attrgetter(param_name) + result = retriever(model) + tf_name = param_mapping[param_name] + value = torch.from_numpy(tf_data[tf_name]) + logger.info(f"TF: {tf_name}, PT: {param_name} ") + if tf_name.endswith("/kernel"): + if not tf_name.endswith("/intermediate/g_dense/kernel"): + if not tf_name.endswith("/output/g_dense/kernel"): + value = value.T + if tf_name.endswith("/depthwise_kernel"): + value = value.permute(1, 2, 0) # 2, 0, 1 + if tf_name.endswith("/pointwise_kernel"): + value = value.permute(2, 1, 0) # 2, 1, 0 + if tf_name.endswith("/conv_attn_key/bias"): + value = value.unsqueeze(-1) + result.data = value + return model + + +class ConvBertEmbeddings(nn.Module): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config): + super().__init__() + self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) + self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) + self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) + + # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load + # any TensorFlow checkpoint file + self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + # position_ids (1, len position emb) is contiguous in memory and exported when serialized + self.register_buffer( + "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False + ) + self.register_buffer( + "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False + ) + + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + ) -> torch.LongTensor: + if input_ids is not None: + input_shape = input_ids.size() + else: + input_shape = inputs_embeds.size()[:-1] + + seq_length = input_shape[1] + + if position_ids is None: + position_ids = self.position_ids[:, :seq_length] + + # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs + # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves + # issue #5664 + if token_type_ids is None: + if hasattr(self, "token_type_ids"): + buffered_token_type_ids = self.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) + + if inputs_embeds is None: + inputs_embeds = self.word_embeddings(input_ids) + position_embeddings = self.position_embeddings(position_ids) + token_type_embeddings = self.token_type_embeddings(token_type_ids) + + embeddings = inputs_embeds + position_embeddings + token_type_embeddings + embeddings = self.LayerNorm(embeddings) + embeddings = self.dropout(embeddings) + return embeddings + + +class ConvBertPreTrainedModel(PreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = ConvBertConfig + load_tf_weights = load_tf_weights_in_convbert + base_model_prefix = "convbert" + supports_gradient_checkpointing = True + + def _init_weights(self, module): + """Initialize the weights""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +class SeparableConv1D(nn.Module): + """This class implements separable convolution, i.e. a depthwise and a pointwise layer""" + + def __init__(self, config, input_filters, output_filters, kernel_size, **kwargs): + super().__init__() + self.depthwise = nn.Conv1d( + input_filters, + input_filters, + kernel_size=kernel_size, + groups=input_filters, + padding=kernel_size // 2, + bias=False, + ) + self.pointwise = nn.Conv1d(input_filters, output_filters, kernel_size=1, bias=False) + self.bias = nn.Parameter(torch.zeros(output_filters, 1)) + + self.depthwise.weight.data.normal_(mean=0.0, std=config.initializer_range) + self.pointwise.weight.data.normal_(mean=0.0, std=config.initializer_range) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + x = self.depthwise(hidden_states) + x = self.pointwise(x) + x += self.bias + return x + + +class ConvBertSelfAttention(nn.Module): + def __init__(self, config): + super().__init__() + if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + new_num_attention_heads = config.num_attention_heads // config.head_ratio + if new_num_attention_heads < 1: + self.head_ratio = config.num_attention_heads + self.num_attention_heads = 1 + else: + self.num_attention_heads = new_num_attention_heads + self.head_ratio = config.head_ratio + + self.conv_kernel_size = config.conv_kernel_size + if config.hidden_size % self.num_attention_heads != 0: + raise ValueError("hidden_size should be divisible by num_attention_heads") + + self.attention_head_size = (config.hidden_size // self.num_attention_heads) // 2 + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = nn.Linear(config.hidden_size, self.all_head_size) + self.key = nn.Linear(config.hidden_size, self.all_head_size) + self.value = nn.Linear(config.hidden_size, self.all_head_size) + + self.key_conv_attn_layer = SeparableConv1D( + config, config.hidden_size, self.all_head_size, self.conv_kernel_size + ) + self.conv_kernel_layer = nn.Linear(self.all_head_size, self.num_attention_heads * self.conv_kernel_size) + self.conv_out_layer = nn.Linear(config.hidden_size, self.all_head_size) + + self.unfold = nn.Unfold( + kernel_size=[self.conv_kernel_size, 1], padding=[int((self.conv_kernel_size - 1) / 2), 0] + ) + + self.dropout = nn.Dropout(config.attention_probs_dropout_prob) + + def transpose_for_scores(self, x): + new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) + x = x.view(*new_x_shape) + return x.permute(0, 2, 1, 3) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + mixed_query_layer = self.query(hidden_states) + batch_size = hidden_states.size(0) + # If this is instantiated as a cross-attention module, the keys + # and values come from an encoder; the attention mask needs to be + # such that the encoder's padding tokens are not attended to. + if encoder_hidden_states is not None: + mixed_key_layer = self.key(encoder_hidden_states) + mixed_value_layer = self.value(encoder_hidden_states) + else: + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states.transpose(1, 2)) + mixed_key_conv_attn_layer = mixed_key_conv_attn_layer.transpose(1, 2) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + conv_attn_layer = torch.multiply(mixed_key_conv_attn_layer, mixed_query_layer) + + conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer) + conv_kernel_layer = torch.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1]) + conv_kernel_layer = torch.softmax(conv_kernel_layer, dim=1) + + conv_out_layer = self.conv_out_layer(hidden_states) + conv_out_layer = torch.reshape(conv_out_layer, [batch_size, -1, self.all_head_size]) + conv_out_layer = conv_out_layer.transpose(1, 2).contiguous().unsqueeze(-1) + conv_out_layer = nn.functional.unfold( + conv_out_layer, + kernel_size=[self.conv_kernel_size, 1], + dilation=1, + padding=[(self.conv_kernel_size - 1) // 2, 0], + stride=1, + ) + conv_out_layer = conv_out_layer.transpose(1, 2).reshape( + batch_size, -1, self.all_head_size, self.conv_kernel_size + ) + conv_out_layer = torch.reshape(conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size]) + conv_out_layer = torch.matmul(conv_out_layer, conv_kernel_layer) + conv_out_layer = torch.reshape(conv_out_layer, [-1, self.all_head_size]) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in ConvBertModel forward() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = nn.functional.softmax(attention_scores, dim=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + context_layer = torch.matmul(attention_probs, value_layer) + context_layer = context_layer.permute(0, 2, 1, 3).contiguous() + + conv_out = torch.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]) + context_layer = torch.cat([context_layer, conv_out], 2) + + # conv and context + new_context_layer_shape = context_layer.size()[:-2] + ( + self.num_attention_heads * self.attention_head_size * 2, + ) + context_layer = context_layer.view(*new_context_layer_shape) + + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + return outputs + + +class ConvBertSelfOutput(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class ConvBertAttention(nn.Module): + def __init__(self, config): + super().__init__() + self.self = ConvBertSelfAttention(config) + self.output = ConvBertSelfOutput(config) + self.pruned_heads = set() + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads + ) + + # Prune linear layers + self.self.query = prune_linear_layer(self.self.query, index) + self.self.key = prune_linear_layer(self.self.key, index) + self.self.value = prune_linear_layer(self.self.value, index) + self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) + + # Update hyper params and store pruned heads + self.self.num_attention_heads = self.self.num_attention_heads - len(heads) + self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads + self.pruned_heads = self.pruned_heads.union(heads) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, Optional[torch.FloatTensor]]: + self_outputs = self.self( + hidden_states, + attention_mask, + head_mask, + encoder_hidden_states, + output_attentions, + ) + attention_output = self.output(self_outputs[0], hidden_states) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + return outputs + + +class GroupedLinearLayer(nn.Module): + def __init__(self, input_size, output_size, num_groups): + super().__init__() + self.input_size = input_size + self.output_size = output_size + self.num_groups = num_groups + self.group_in_dim = self.input_size // self.num_groups + self.group_out_dim = self.output_size // self.num_groups + self.weight = nn.Parameter(torch.empty(self.num_groups, self.group_in_dim, self.group_out_dim)) + self.bias = nn.Parameter(torch.empty(output_size)) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + batch_size = list(hidden_states.size())[0] + x = torch.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]) + x = x.permute(1, 0, 2) + x = torch.matmul(x, self.weight) + x = x.permute(1, 0, 2) + x = torch.reshape(x, [batch_size, -1, self.output_size]) + x = x + self.bias + return x + + +class ConvBertIntermediate(nn.Module): + def __init__(self, config): + super().__init__() + if config.num_groups == 1: + self.dense = nn.Linear(config.hidden_size, config.intermediate_size) + else: + self.dense = GroupedLinearLayer( + input_size=config.hidden_size, output_size=config.intermediate_size, num_groups=config.num_groups + ) + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = ACT2FN[config.hidden_act] + else: + self.intermediate_act_fn = config.hidden_act + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + return hidden_states + + +class ConvBertOutput(nn.Module): + def __init__(self, config): + super().__init__() + if config.num_groups == 1: + self.dense = nn.Linear(config.intermediate_size, config.hidden_size) + else: + self.dense = GroupedLinearLayer( + input_size=config.intermediate_size, output_size=config.hidden_size, num_groups=config.num_groups + ) + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.dropout = nn.Dropout(config.hidden_dropout_prob) + + def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + return hidden_states + + +class ConvBertLayer(nn.Module): + def __init__(self, config): + super().__init__() + self.chunk_size_feed_forward = config.chunk_size_feed_forward + self.seq_len_dim = 1 + self.attention = ConvBertAttention(config) + self.is_decoder = config.is_decoder + self.add_cross_attention = config.add_cross_attention + if self.add_cross_attention: + if not self.is_decoder: + raise TypeError(f"{self} should be used as a decoder model if cross attention is added") + self.crossattention = ConvBertAttention(config) + self.intermediate = ConvBertIntermediate(config) + self.output = ConvBertOutput(config) + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + ) -> Tuple[torch.Tensor, Optional[torch.FloatTensor]]: + self_attention_outputs = self.attention( + hidden_states, + attention_mask, + head_mask, + output_attentions=output_attentions, + ) + attention_output = self_attention_outputs[0] + outputs = self_attention_outputs[1:] # add self attentions if we output attention weights + + if self.is_decoder and encoder_hidden_states is not None: + if not hasattr(self, "crossattention"): + raise AttributeError( + f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" + " by setting `config.add_cross_attention=True`" + ) + cross_attention_outputs = self.crossattention( + attention_output, + encoder_attention_mask, + head_mask, + encoder_hidden_states, + output_attentions, + ) + attention_output = cross_attention_outputs[0] + outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights + + layer_output = apply_chunking_to_forward( + self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output + ) + outputs = (layer_output,) + outputs + return outputs + + def feed_forward_chunk(self, attention_output): + intermediate_output = self.intermediate(attention_output) + layer_output = self.output(intermediate_output, attention_output) + return layer_output + + +class ConvBertEncoder(nn.Module): + def __init__(self, config): + super().__init__() + self.config = config + self.layer = nn.ModuleList([ConvBertLayer(config) for _ in range(config.num_hidden_layers)]) + self.gradient_checkpointing = False + + def forward( + self, + hidden_states: torch.Tensor, + attention_mask: Optional[torch.FloatTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = False, + output_hidden_states: Optional[bool] = False, + return_dict: Optional[bool] = True, + ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]: + all_hidden_states = () if output_hidden_states else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_head_mask = head_mask[i] if head_mask is not None else None + + if self.gradient_checkpointing and self.training: + layer_outputs = self._gradient_checkpointing_func( + layer_module.__call__, + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions, + ) + else: + layer_outputs = layer_module( + hidden_states, + attention_mask, + layer_head_mask, + encoder_hidden_states, + encoder_attention_mask, + output_attentions, + ) + hidden_states = layer_outputs[0] + if output_attentions: + all_self_attentions = all_self_attentions + (layer_outputs[1],) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + (layer_outputs[2],) + + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple( + v + for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions] + if v is not None + ) + return BaseModelOutputWithCrossAttentions( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) + + +class ConvBertPredictionHeadTransform(nn.Module): + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + if isinstance(config.hidden_act, str): + self.transform_act_fn = ACT2FN[config.hidden_act] + else: + self.transform_act_fn = config.hidden_act + self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + + def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + return hidden_states + + +CONVBERT_START_DOCSTRING = r""" + This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use + it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and + behavior. + + Parameters: + config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CONVBERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`torch.LongTensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and + [`PreTrainedTokenizer.__call__`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`torch.LongTensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert *input_ids* indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. +""" + + +@add_start_docstrings( + "The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.", + CONVBERT_START_DOCSTRING, +) +class ConvBertModel(ConvBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.embeddings = ConvBertEmbeddings(config) + + if config.embedding_size != config.hidden_size: + self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size) + + self.encoder = ConvBertEncoder(config) + self.config = config + # Initialize weights and apply final processing + self.post_init() + + def get_input_embeddings(self): + return self.embeddings.word_embeddings + + def set_input_embeddings(self, value): + self.embeddings.word_embeddings = value + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + for layer, heads in heads_to_prune.items(): + self.encoder.layer[layer].attention.prune_heads(heads) + + @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=BaseModelOutputWithCrossAttentions, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = ( + output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states + ) + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) + input_shape = input_ids.size() + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + batch_size, seq_length = input_shape + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if attention_mask is None: + attention_mask = torch.ones(input_shape, device=device) + if token_type_ids is None: + if hasattr(self.embeddings, "token_type_ids"): + buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] + buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) + token_type_ids = buffered_token_type_ids_expanded + else: + token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) + + extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) + head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) + + hidden_states = self.embeddings( + input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds + ) + + if hasattr(self, "embeddings_project"): + hidden_states = self.embeddings_project(hidden_states) + + hidden_states = self.encoder( + hidden_states, + attention_mask=extended_attention_mask, + head_mask=head_mask, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + return hidden_states + + +class ConvBertGeneratorPredictions(nn.Module): + """Prediction module for the generator, made up of two dense layers.""" + + def __init__(self, config): + super().__init__() + + self.activation = get_activation("gelu") + self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) + self.dense = nn.Linear(config.hidden_size, config.embedding_size) + + def forward(self, generator_hidden_states: torch.FloatTensor) -> torch.FloatTensor: + hidden_states = self.dense(generator_hidden_states) + hidden_states = self.activation(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + + return hidden_states + + +@add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING) +class ConvBertForMaskedLM(ConvBertPreTrainedModel): + _tied_weights_keys = ["generator.lm_head.weight"] + + def __init__(self, config): + super().__init__(config) + + self.convbert = ConvBertModel(config) + self.generator_predictions = ConvBertGeneratorPredictions(config) + + self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size) + # Initialize weights and apply final processing + self.post_init() + + def get_output_embeddings(self): + return self.generator_lm_head + + def set_output_embeddings(self, word_embeddings): + self.generator_lm_head = word_embeddings + + @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, MaskedLMOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + generator_hidden_states = self.convbert( + input_ids, + attention_mask, + token_type_ids, + position_ids, + head_mask, + inputs_embeds, + output_attentions, + output_hidden_states, + return_dict, + ) + generator_sequence_output = generator_hidden_states[0] + + prediction_scores = self.generator_predictions(generator_sequence_output) + prediction_scores = self.generator_lm_head(prediction_scores) + + loss = None + # Masked language modeling softmax layer + if labels is not None: + loss_fct = nn.CrossEntropyLoss() # -100 index = padding token + loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) + + if not return_dict: + output = (prediction_scores,) + generator_hidden_states[1:] + return ((loss,) + output) if loss is not None else output + + return MaskedLMOutput( + loss=loss, + logits=prediction_scores, + hidden_states=generator_hidden_states.hidden_states, + attentions=generator_hidden_states.attentions, + ) + + +class ConvBertClassificationHead(nn.Module): + """Head for sentence-level classification tasks.""" + + def __init__(self, config): + super().__init__() + self.dense = nn.Linear(config.hidden_size, config.hidden_size) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.out_proj = nn.Linear(config.hidden_size, config.num_labels) + + self.config = config + + def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor: + x = hidden_states[:, 0, :] # take token (equiv. to [CLS]) + x = self.dropout(x) + x = self.dense(x) + x = ACT2FN[self.config.hidden_act](x) + x = self.dropout(x) + x = self.out_proj(x) + return x + + +@add_start_docstrings( + """ + ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the + pooled output) e.g. for GLUE tasks. + """, + CONVBERT_START_DOCSTRING, +) +class ConvBertForSequenceClassification(ConvBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + self.config = config + self.convbert = ConvBertModel(config) + self.classifier = ConvBertClassificationHead(config) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=SequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, SequenceClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.convbert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + if self.config.problem_type is None: + if self.num_labels == 1: + self.config.problem_type = "regression" + elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): + self.config.problem_type = "single_label_classification" + else: + self.config.problem_type = "multi_label_classification" + + if self.config.problem_type == "regression": + loss_fct = MSELoss() + if self.num_labels == 1: + loss = loss_fct(logits.squeeze(), labels.squeeze()) + else: + loss = loss_fct(logits, labels) + elif self.config.problem_type == "single_label_classification": + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + elif self.config.problem_type == "multi_label_classification": + loss_fct = BCEWithLogitsLoss() + loss = loss_fct(logits, labels) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return SequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + CONVBERT_START_DOCSTRING, +) +class ConvBertForMultipleChoice(ConvBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.convbert = ConvBertModel(config) + self.sequence_summary = SequenceSummary(config) + self.classifier = nn.Linear(config.hidden_size, 1) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward( + CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=MultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, MultipleChoiceModelOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., + num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See + `input_ids` above) + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] + + input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None + attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None + token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None + position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None + inputs_embeds = ( + inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) + if inputs_embeds is not None + else None + ) + + outputs = self.convbert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + pooled_output = self.sequence_summary(sequence_output) + logits = self.classifier(pooled_output) + reshaped_logits = logits.view(-1, num_choices) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(reshaped_logits, labels) + + if not return_dict: + output = (reshaped_logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return MultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + CONVBERT_START_DOCSTRING, +) +class ConvBertForTokenClassification(ConvBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + self.num_labels = config.num_labels + + self.convbert = ConvBertModel(config) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = nn.Dropout(classifier_dropout) + self.classifier = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + labels: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, TokenClassifierOutput]: + r""" + labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.convbert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + sequence_output = self.dropout(sequence_output) + logits = self.classifier(sequence_output) + + loss = None + if labels is not None: + loss_fct = CrossEntropyLoss() + loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +@add_start_docstrings( + """ + ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layers on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + CONVBERT_START_DOCSTRING, +) +class ConvBertForQuestionAnswering(ConvBertPreTrainedModel): + def __init__(self, config): + super().__init__(config) + + self.num_labels = config.num_labels + self.convbert = ConvBertModel(config) + self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) + + # Initialize weights and apply final processing + self.post_init() + + @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=QuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + start_positions: Optional[torch.LongTensor] = None, + end_positions: Optional[torch.LongTensor] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, QuestionAnsweringModelOutput]: + r""" + start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + outputs = self.convbert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + sequence_output = outputs[0] + + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = logits.split(1, dim=-1) + start_logits = start_logits.squeeze(-1).contiguous() + end_logits = end_logits.squeeze(-1).contiguous() + + total_loss = None + if start_positions is not None and end_positions is not None: + # If we are on multi-GPU, split add a dimension + if len(start_positions.size()) > 1: + start_positions = start_positions.squeeze(-1) + if len(end_positions.size()) > 1: + end_positions = end_positions.squeeze(-1) + # sometimes the start/end positions are outside our model inputs, we ignore these terms + ignored_index = start_logits.size(1) + start_positions = start_positions.clamp(0, ignored_index) + end_positions = end_positions.clamp(0, ignored_index) + + loss_fct = CrossEntropyLoss(ignore_index=ignored_index) + start_loss = loss_fct(start_logits, start_positions) + end_loss = loss_fct(end_logits, end_positions) + total_loss = (start_loss + end_loss) / 2 + + if not return_dict: + output = (start_logits, end_logits) + outputs[1:] + return ((total_loss,) + output) if total_loss is not None else output + + return QuestionAnsweringModelOutput( + loss=total_loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/modeling_tf_convbert.py b/venv/lib/python3.10/site-packages/transformers/models/convbert/modeling_tf_convbert.py new file mode 100644 index 0000000000000000000000000000000000000000..7206b3558ace8a26994d9608d3963ee6f34f1e91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/convbert/modeling_tf_convbert.py @@ -0,0 +1,1468 @@ +# coding=utf-8 +# Copyright 2021 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" TF 2.0 ConvBERT model.""" + + +from __future__ import annotations + +from typing import Optional, Tuple, Union + +import numpy as np +import tensorflow as tf + +from ...activations_tf import get_tf_activation +from ...modeling_tf_outputs import ( + TFBaseModelOutput, + TFMaskedLMOutput, + TFMultipleChoiceModelOutput, + TFQuestionAnsweringModelOutput, + TFSequenceClassifierOutput, + TFTokenClassifierOutput, +) +from ...modeling_tf_utils import ( + TFMaskedLanguageModelingLoss, + TFModelInputType, + TFMultipleChoiceLoss, + TFPreTrainedModel, + TFQuestionAnsweringLoss, + TFSequenceClassificationLoss, + TFSequenceSummary, + TFTokenClassificationLoss, + get_initializer, + keras, + keras_serializable, + unpack_inputs, +) +from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax +from ...utils import ( + add_code_sample_docstrings, + add_start_docstrings, + add_start_docstrings_to_model_forward, + logging, +) +from .configuration_convbert import ConvBertConfig + + +logger = logging.get_logger(__name__) + +_CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base" +_CONFIG_FOR_DOC = "ConvBertConfig" + + +from ..deprecated._archive_maps import TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402 + + +# Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert +class TFConvBertEmbeddings(keras.layers.Layer): + """Construct the embeddings from word, position and token_type embeddings.""" + + def __init__(self, config: ConvBertConfig, **kwargs): + super().__init__(**kwargs) + + self.config = config + self.embedding_size = config.embedding_size + self.max_position_embeddings = config.max_position_embeddings + self.initializer_range = config.initializer_range + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob) + + def build(self, input_shape=None): + with tf.name_scope("word_embeddings"): + self.weight = self.add_weight( + name="weight", + shape=[self.config.vocab_size, self.embedding_size], + initializer=get_initializer(self.initializer_range), + ) + + with tf.name_scope("token_type_embeddings"): + self.token_type_embeddings = self.add_weight( + name="embeddings", + shape=[self.config.type_vocab_size, self.embedding_size], + initializer=get_initializer(self.initializer_range), + ) + + with tf.name_scope("position_embeddings"): + self.position_embeddings = self.add_weight( + name="embeddings", + shape=[self.max_position_embeddings, self.embedding_size], + initializer=get_initializer(self.initializer_range), + ) + + if self.built: + return + self.built = True + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.embedding_size]) + + # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call + def call( + self, + input_ids: tf.Tensor = None, + position_ids: tf.Tensor = None, + token_type_ids: tf.Tensor = None, + inputs_embeds: tf.Tensor = None, + past_key_values_length=0, + training: bool = False, + ) -> tf.Tensor: + """ + Applies embedding based on inputs tensor. + + Returns: + final_embeddings (`tf.Tensor`): output embedding tensor. + """ + if input_ids is None and inputs_embeds is None: + raise ValueError("Need to provide either `input_ids` or `input_embeds`.") + + if input_ids is not None: + check_embeddings_within_bounds(input_ids, self.config.vocab_size) + inputs_embeds = tf.gather(params=self.weight, indices=input_ids) + + input_shape = shape_list(inputs_embeds)[:-1] + + if token_type_ids is None: + token_type_ids = tf.fill(dims=input_shape, value=0) + + if position_ids is None: + position_ids = tf.expand_dims( + tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0 + ) + + position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) + token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) + final_embeddings = inputs_embeds + position_embeds + token_type_embeds + final_embeddings = self.LayerNorm(inputs=final_embeddings) + final_embeddings = self.dropout(inputs=final_embeddings, training=training) + + return final_embeddings + + +class TFConvBertSelfAttention(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + if config.hidden_size % config.num_attention_heads != 0: + raise ValueError( + f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " + f"heads ({config.num_attention_heads})" + ) + + new_num_attention_heads = int(config.num_attention_heads / config.head_ratio) + if new_num_attention_heads < 1: + self.head_ratio = config.num_attention_heads + num_attention_heads = 1 + else: + num_attention_heads = new_num_attention_heads + self.head_ratio = config.head_ratio + + self.num_attention_heads = num_attention_heads + self.conv_kernel_size = config.conv_kernel_size + + if config.hidden_size % self.num_attention_heads != 0: + raise ValueError("hidden_size should be divisible by num_attention_heads") + + self.attention_head_size = config.hidden_size // config.num_attention_heads + self.all_head_size = self.num_attention_heads * self.attention_head_size + self.query = keras.layers.Dense( + self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query" + ) + self.key = keras.layers.Dense( + self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key" + ) + self.value = keras.layers.Dense( + self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value" + ) + + self.key_conv_attn_layer = keras.layers.SeparableConv1D( + self.all_head_size, + self.conv_kernel_size, + padding="same", + activation=None, + depthwise_initializer=get_initializer(1 / self.conv_kernel_size), + pointwise_initializer=get_initializer(config.initializer_range), + name="key_conv_attn_layer", + ) + + self.conv_kernel_layer = keras.layers.Dense( + self.num_attention_heads * self.conv_kernel_size, + activation=None, + name="conv_kernel_layer", + kernel_initializer=get_initializer(config.initializer_range), + ) + + self.conv_out_layer = keras.layers.Dense( + self.all_head_size, + activation=None, + name="conv_out_layer", + kernel_initializer=get_initializer(config.initializer_range), + ) + + self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob) + self.config = config + + def transpose_for_scores(self, x, batch_size): + # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] + x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) + return tf.transpose(x, perm=[0, 2, 1, 3]) + + def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): + batch_size = shape_list(hidden_states)[0] + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) + key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) + conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer) + + conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer) + conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1]) + conv_kernel_layer = stable_softmax(conv_kernel_layer, axis=1) + + paddings = tf.constant( + [ + [ + 0, + 0, + ], + [int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)], + [0, 0], + ] + ) + + conv_out_layer = self.conv_out_layer(hidden_states) + conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size]) + conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT") + + unfold_conv_out_layer = tf.stack( + [ + tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size]) + for i in range(self.conv_kernel_size) + ], + axis=-1, + ) + + conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size]) + + conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer) + conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size]) + + # Take the dot product between "query" and "key" to get the raw attention scores. + attention_scores = tf.matmul( + query_layer, key_layer, transpose_b=True + ) # (batch size, num_heads, seq_len_q, seq_len_k) + dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores + attention_scores = attention_scores / tf.math.sqrt(dk) + + if attention_mask is not None: + # Apply the attention mask is (precomputed for all layers in TFBertModel call() function) + attention_scores = attention_scores + attention_mask + + # Normalize the attention scores to probabilities. + attention_probs = stable_softmax(attention_scores, axis=-1) + + # This is actually dropping out entire tokens to attend to, which might + # seem a bit unusual, but is taken from the original Transformer paper. + attention_probs = self.dropout(attention_probs, training=training) + + # Mask heads if we want to + if head_mask is not None: + attention_probs = attention_probs * head_mask + + value_layer = tf.reshape( + mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size] + ) + value_layer = tf.transpose(value_layer, [0, 2, 1, 3]) + + context_layer = tf.matmul(attention_probs, value_layer) + context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) + + conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]) + context_layer = tf.concat([context_layer, conv_out], 2) + context_layer = tf.reshape( + context_layer, (batch_size, -1, self.head_ratio * self.all_head_size) + ) # (batch_size, seq_len_q, all_head_size) + outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "query", None) is not None: + with tf.name_scope(self.query.name): + self.query.build([None, None, self.config.hidden_size]) + if getattr(self, "key", None) is not None: + with tf.name_scope(self.key.name): + self.key.build([None, None, self.config.hidden_size]) + if getattr(self, "value", None) is not None: + with tf.name_scope(self.value.name): + self.value.build([None, None, self.config.hidden_size]) + if getattr(self, "key_conv_attn_layer", None) is not None: + with tf.name_scope(self.key_conv_attn_layer.name): + self.key_conv_attn_layer.build([None, None, self.config.hidden_size]) + if getattr(self, "conv_kernel_layer", None) is not None: + with tf.name_scope(self.conv_kernel_layer.name): + self.conv_kernel_layer.build([None, None, self.all_head_size]) + if getattr(self, "conv_out_layer", None) is not None: + with tf.name_scope(self.conv_out_layer.name): + self.conv_out_layer.build([None, None, self.config.hidden_size]) + + +class TFConvBertSelfOutput(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.dense = keras.layers.Dense( + config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) + self.config = config + + def call(self, hidden_states, input_tensor, training=False): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.hidden_size]) + + +class TFConvBertAttention(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.self_attention = TFConvBertSelfAttention(config, name="self") + self.dense_output = TFConvBertSelfOutput(config, name="output") + + def prune_heads(self, heads): + raise NotImplementedError + + def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False): + self_outputs = self.self_attention( + input_tensor, attention_mask, head_mask, output_attentions, training=training + ) + attention_output = self.dense_output(self_outputs[0], input_tensor, training=training) + outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "self_attention", None) is not None: + with tf.name_scope(self.self_attention.name): + self.self_attention.build(None) + if getattr(self, "dense_output", None) is not None: + with tf.name_scope(self.dense_output.name): + self.dense_output.build(None) + + +class GroupedLinearLayer(keras.layers.Layer): + def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs): + super().__init__(**kwargs) + self.input_size = input_size + self.output_size = output_size + self.num_groups = num_groups + self.kernel_initializer = kernel_initializer + self.group_in_dim = self.input_size // self.num_groups + self.group_out_dim = self.output_size // self.num_groups + + def build(self, input_shape=None): + self.kernel = self.add_weight( + "kernel", + shape=[self.group_out_dim, self.group_in_dim, self.num_groups], + initializer=self.kernel_initializer, + trainable=True, + ) + + self.bias = self.add_weight( + "bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True + ) + super().build(input_shape) + + def call(self, hidden_states): + batch_size = shape_list(hidden_states)[0] + x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2]) + x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0])) + x = tf.transpose(x, [1, 0, 2]) + x = tf.reshape(x, [batch_size, -1, self.output_size]) + x = tf.nn.bias_add(value=x, bias=self.bias) + return x + + +class TFConvBertIntermediate(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + if config.num_groups == 1: + self.dense = keras.layers.Dense( + config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + else: + self.dense = GroupedLinearLayer( + config.hidden_size, + config.intermediate_size, + num_groups=config.num_groups, + kernel_initializer=get_initializer(config.initializer_range), + name="dense", + ) + + if isinstance(config.hidden_act, str): + self.intermediate_act_fn = get_tf_activation(config.hidden_act) + else: + self.intermediate_act_fn = config.hidden_act + self.config = config + + def call(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.intermediate_act_fn(hidden_states) + + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + + +class TFConvBertOutput(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + if config.num_groups == 1: + self.dense = keras.layers.Dense( + config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + else: + self.dense = GroupedLinearLayer( + config.intermediate_size, + config.hidden_size, + num_groups=config.num_groups, + kernel_initializer=get_initializer(config.initializer_range), + name="dense", + ) + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dropout = keras.layers.Dropout(config.hidden_dropout_prob) + self.config = config + + def call(self, hidden_states, input_tensor, training=False): + hidden_states = self.dense(hidden_states) + hidden_states = self.dropout(hidden_states, training=training) + hidden_states = self.LayerNorm(hidden_states + input_tensor) + + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.hidden_size]) + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.intermediate_size]) + + +class TFConvBertLayer(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.attention = TFConvBertAttention(config, name="attention") + self.intermediate = TFConvBertIntermediate(config, name="intermediate") + self.bert_output = TFConvBertOutput(config, name="output") + + def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False): + attention_outputs = self.attention( + hidden_states, attention_mask, head_mask, output_attentions, training=training + ) + attention_output = attention_outputs[0] + intermediate_output = self.intermediate(attention_output) + layer_output = self.bert_output(intermediate_output, attention_output, training=training) + outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "attention", None) is not None: + with tf.name_scope(self.attention.name): + self.attention.build(None) + if getattr(self, "intermediate", None) is not None: + with tf.name_scope(self.intermediate.name): + self.intermediate.build(None) + if getattr(self, "bert_output", None) is not None: + with tf.name_scope(self.bert_output.name): + self.bert_output.build(None) + + +class TFConvBertEncoder(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)] + + def call( + self, + hidden_states, + attention_mask, + head_mask, + output_attentions, + output_hidden_states, + return_dict, + training=False, + ): + all_hidden_states = () if output_hidden_states else None + all_attentions = () if output_attentions else None + + for i, layer_module in enumerate(self.layer): + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + layer_outputs = layer_module( + hidden_states, attention_mask, head_mask[i], output_attentions, training=training + ) + hidden_states = layer_outputs[0] + + if output_attentions: + all_attentions = all_attentions + (layer_outputs[1],) + + # Add last layer + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states,) + + if not return_dict: + return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) + + return TFBaseModelOutput( + last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "layer", None) is not None: + for layer in self.layer: + with tf.name_scope(layer.name): + layer.build(None) + + +class TFConvBertPredictionHeadTransform(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.dense = keras.layers.Dense( + config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + + if isinstance(config.hidden_act, str): + self.transform_act_fn = get_tf_activation(config.hidden_act) + else: + self.transform_act_fn = config.hidden_act + + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.config = config + + def call(self, hidden_states): + hidden_states = self.dense(hidden_states) + hidden_states = self.transform_act_fn(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.hidden_size]) + + +@keras_serializable +class TFConvBertMainLayer(keras.layers.Layer): + config_class = ConvBertConfig + + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.embeddings = TFConvBertEmbeddings(config, name="embeddings") + + if config.embedding_size != config.hidden_size: + self.embeddings_project = keras.layers.Dense(config.hidden_size, name="embeddings_project") + + self.encoder = TFConvBertEncoder(config, name="encoder") + self.config = config + + def get_input_embeddings(self): + return self.embeddings + + def set_input_embeddings(self, value): + self.embeddings.weight = value + self.embeddings.vocab_size = value.shape[0] + + def _prune_heads(self, heads_to_prune): + """ + Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base + class PreTrainedModel + """ + raise NotImplementedError + + def get_extended_attention_mask(self, attention_mask, input_shape, dtype): + if attention_mask is None: + attention_mask = tf.fill(input_shape, 1) + + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and -10000.0 for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + extended_attention_mask = tf.cast(extended_attention_mask, dtype) + extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 + + return extended_attention_mask + + def get_head_mask(self, head_mask): + if head_mask is not None: + raise NotImplementedError + else: + head_mask = [None] * self.config.num_hidden_layers + + return head_mask + + @unpack_inputs + def call( + self, + input_ids=None, + attention_mask=None, + token_type_ids=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + training=False, + ): + if input_ids is not None and inputs_embeds is not None: + raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") + elif input_ids is not None: + input_shape = shape_list(input_ids) + elif inputs_embeds is not None: + input_shape = shape_list(inputs_embeds)[:-1] + else: + raise ValueError("You have to specify either input_ids or inputs_embeds") + + if attention_mask is None: + attention_mask = tf.fill(input_shape, 1) + + if token_type_ids is None: + token_type_ids = tf.fill(input_shape, 0) + + hidden_states = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training) + extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, hidden_states.dtype) + head_mask = self.get_head_mask(head_mask) + + if hasattr(self, "embeddings_project"): + hidden_states = self.embeddings_project(hidden_states, training=training) + + hidden_states = self.encoder( + hidden_states, + extended_attention_mask, + head_mask, + output_attentions, + output_hidden_states, + return_dict, + training=training, + ) + + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "embeddings", None) is not None: + with tf.name_scope(self.embeddings.name): + self.embeddings.build(None) + if getattr(self, "encoder", None) is not None: + with tf.name_scope(self.encoder.name): + self.encoder.build(None) + if getattr(self, "embeddings_project", None) is not None: + with tf.name_scope(self.embeddings_project.name): + self.embeddings_project.build([None, None, self.config.embedding_size]) + + +class TFConvBertPreTrainedModel(TFPreTrainedModel): + """ + An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained + models. + """ + + config_class = ConvBertConfig + base_model_prefix = "convbert" + + +CONVBERT_START_DOCSTRING = r""" + + This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the + library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads + etc.) + + This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it + as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and + behavior. + + + + TensorFlow models and layers in `transformers` accept two formats as input: + + - having all inputs as keyword arguments (like PyTorch models), or + - having all inputs as a list, tuple or dict in the first positional argument. + + The reason the second format is supported is that Keras methods prefer this format when passing inputs to models + and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just + pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second + format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with + the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first + positional argument: + + - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` + - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: + `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` + - a dictionary with one or several input Tensors associated to the input names given in the docstring: + `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` + + Note that when creating models and layers with + [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry + about any of this, as you can just pass inputs like you would to any other Python function! + + + + Args: + config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model. + Initializing with a config file does not load the weights associated with the model, only the + configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. +""" + +CONVBERT_INPUTS_DOCSTRING = r""" + Args: + input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`): + Indices of input sequence tokens in the vocabulary. + + Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and + [`PreTrainedTokenizer.encode`] for details. + + [What are input IDs?](../glossary#input-ids) + attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): + Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: + + - 1 for tokens that are **not masked**, + - 0 for tokens that are **masked**. + + [What are attention masks?](../glossary#attention-mask) + token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): + Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, + 1]`: + + - 0 corresponds to a *sentence A* token, + - 1 corresponds to a *sentence B* token. + + [What are token type IDs?](../glossary#token-type-ids) + position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*): + Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, + config.max_position_embeddings - 1]`. + + [What are position IDs?](../glossary#position-ids) + head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): + Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: + + - 1 indicates the head is **not masked**, + - 0 indicates the head is **masked**. + + inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*): + Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This + is useful if you want more control over how to convert `input_ids` indices into associated vectors than the + model's internal embedding lookup matrix. + output_attentions (`bool`, *optional*): + Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned + tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the + config will be used instead. + output_hidden_states (`bool`, *optional*): + Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for + more detail. This argument can be used only in eager mode, in graph mode the value in the config will be + used instead. + return_dict (`bool`, *optional*): + Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in + eager mode, in graph mode the value will always be set to True. + training (`bool`, *optional*, defaults to `False`): + Whether or not to use the model in training mode (some modules like dropout modules have different + behaviors between training and evaluation). +""" + + +@add_start_docstrings( + "The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.", + CONVBERT_START_DOCSTRING, +) +class TFConvBertModel(TFConvBertPreTrainedModel): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.convbert = TFConvBertMainLayer(config, name="convbert") + + @unpack_inputs + @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFBaseModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: Optional[Union[np.array, tf.Tensor]] = None, + token_type_ids: Optional[Union[np.array, tf.Tensor]] = None, + position_ids: Optional[Union[np.array, tf.Tensor]] = None, + head_mask: Optional[Union[np.array, tf.Tensor]] = None, + inputs_embeds: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + training: bool = False, + ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]: + outputs = self.convbert( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + + return outputs + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "convbert", None) is not None: + with tf.name_scope(self.convbert.name): + self.convbert.build(None) + + +class TFConvBertMaskedLMHead(keras.layers.Layer): + def __init__(self, config, input_embeddings, **kwargs): + super().__init__(**kwargs) + + self.config = config + self.embedding_size = config.embedding_size + self.input_embeddings = input_embeddings + + def build(self, input_shape): + self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") + + super().build(input_shape) + + def get_output_embeddings(self): + return self.input_embeddings + + def set_output_embeddings(self, value): + self.input_embeddings.weight = value + self.input_embeddings.vocab_size = shape_list(value)[0] + + def get_bias(self): + return {"bias": self.bias} + + def set_bias(self, value): + self.bias = value["bias"] + self.config.vocab_size = shape_list(value["bias"])[0] + + def call(self, hidden_states): + seq_length = shape_list(tensor=hidden_states)[1] + hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size]) + hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) + hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) + hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) + + return hidden_states + + +class TFConvBertGeneratorPredictions(keras.layers.Layer): + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") + self.dense = keras.layers.Dense(config.embedding_size, name="dense") + self.config = config + + def call(self, generator_hidden_states, training=False): + hidden_states = self.dense(generator_hidden_states) + hidden_states = get_tf_activation("gelu")(hidden_states) + hidden_states = self.LayerNorm(hidden_states) + + return hidden_states + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "LayerNorm", None) is not None: + with tf.name_scope(self.LayerNorm.name): + self.LayerNorm.build([None, None, self.config.embedding_size]) + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + + +@add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING) +class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, **kwargs) + + self.config = config + self.convbert = TFConvBertMainLayer(config, name="convbert") + self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions") + + if isinstance(config.hidden_act, str): + self.activation = get_tf_activation(config.hidden_act) + else: + self.activation = config.hidden_act + + self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head") + + def get_lm_head(self): + return self.generator_lm_head + + def get_prefix_bias_name(self): + return self.name + "/" + self.generator_lm_head.name + + @unpack_inputs + @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFMaskedLMOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[Tuple, TFMaskedLMOutput]: + r""" + labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., + config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the + loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` + """ + generator_hidden_states = self.convbert( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + generator_sequence_output = generator_hidden_states[0] + prediction_scores = self.generator_predictions(generator_sequence_output, training=training) + prediction_scores = self.generator_lm_head(prediction_scores, training=training) + loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores) + + if not return_dict: + output = (prediction_scores,) + generator_hidden_states[1:] + + return ((loss,) + output) if loss is not None else output + + return TFMaskedLMOutput( + loss=loss, + logits=prediction_scores, + hidden_states=generator_hidden_states.hidden_states, + attentions=generator_hidden_states.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "convbert", None) is not None: + with tf.name_scope(self.convbert.name): + self.convbert.build(None) + if getattr(self, "generator_predictions", None) is not None: + with tf.name_scope(self.generator_predictions.name): + self.generator_predictions.build(None) + if getattr(self, "generator_lm_head", None) is not None: + with tf.name_scope(self.generator_lm_head.name): + self.generator_lm_head.build(None) + + +class TFConvBertClassificationHead(keras.layers.Layer): + """Head for sentence-level classification tasks.""" + + def __init__(self, config, **kwargs): + super().__init__(**kwargs) + + self.dense = keras.layers.Dense( + config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense" + ) + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = keras.layers.Dropout(classifier_dropout) + self.out_proj = keras.layers.Dense( + config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj" + ) + + self.config = config + + def call(self, hidden_states, **kwargs): + x = hidden_states[:, 0, :] # take token (equiv. to [CLS]) + x = self.dropout(x) + x = self.dense(x) + x = get_tf_activation(self.config.hidden_act)(x) + x = self.dropout(x) + x = self.out_proj(x) + + return x + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "dense", None) is not None: + with tf.name_scope(self.dense.name): + self.dense.build([None, None, self.config.hidden_size]) + if getattr(self, "out_proj", None) is not None: + with tf.name_scope(self.out_proj.name): + self.out_proj.build([None, None, self.config.hidden_size]) + + +@add_start_docstrings( + """ + ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks. + """, + CONVBERT_START_DOCSTRING, +) +class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + self.num_labels = config.num_labels + self.convbert = TFConvBertMainLayer(config, name="convbert") + self.classifier = TFConvBertClassificationHead(config, name="classifier") + + @unpack_inputs + @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFSequenceClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[Tuple, TFSequenceClassifierOutput]: + r""" + labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., + config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If + `config.num_labels > 1` a classification loss is computed (Cross-Entropy). + """ + outputs = self.convbert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + logits = self.classifier(outputs[0], training=training) + loss = None if labels is None else self.hf_compute_loss(labels, logits) + + if not return_dict: + output = (logits,) + outputs[1:] + + return ((loss,) + output) if loss is not None else output + + return TFSequenceClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "convbert", None) is not None: + with tf.name_scope(self.convbert.name): + self.convbert.build(None) + if getattr(self, "classifier", None) is not None: + with tf.name_scope(self.classifier.name): + self.classifier.build(None) + + +@add_start_docstrings( + """ + ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a + softmax) e.g. for RocStories/SWAG tasks. + """, + CONVBERT_START_DOCSTRING, +) +class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.convbert = TFConvBertMainLayer(config, name="convbert") + self.sequence_summary = TFSequenceSummary( + config, initializer_range=config.initializer_range, name="sequence_summary" + ) + self.classifier = keras.layers.Dense( + 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier" + ) + self.config = config + + @unpack_inputs + @add_start_docstrings_to_model_forward( + CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") + ) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFMultipleChoiceModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[Tuple, TFMultipleChoiceModelOutput]: + r""" + labels (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` + where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) + """ + if input_ids is not None: + num_choices = shape_list(input_ids)[1] + seq_length = shape_list(input_ids)[2] + else: + num_choices = shape_list(inputs_embeds)[1] + seq_length = shape_list(inputs_embeds)[2] + + flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None + flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None + flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None + flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None + flat_inputs_embeds = ( + tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3])) + if inputs_embeds is not None + else None + ) + outputs = self.convbert( + flat_input_ids, + flat_attention_mask, + flat_token_type_ids, + flat_position_ids, + head_mask, + flat_inputs_embeds, + output_attentions, + output_hidden_states, + return_dict=return_dict, + training=training, + ) + logits = self.sequence_summary(outputs[0], training=training) + logits = self.classifier(logits) + reshaped_logits = tf.reshape(logits, (-1, num_choices)) + loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits) + + if not return_dict: + output = (reshaped_logits,) + outputs[1:] + + return ((loss,) + output) if loss is not None else output + + return TFMultipleChoiceModelOutput( + loss=loss, + logits=reshaped_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "convbert", None) is not None: + with tf.name_scope(self.convbert.name): + self.convbert.build(None) + if getattr(self, "sequence_summary", None) is not None: + with tf.name_scope(self.sequence_summary.name): + self.sequence_summary.build(None) + if getattr(self, "classifier", None) is not None: + with tf.name_scope(self.classifier.name): + self.classifier.build([None, None, self.config.hidden_size]) + + +@add_start_docstrings( + """ + ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for + Named-Entity-Recognition (NER) tasks. + """, + CONVBERT_START_DOCSTRING, +) +class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.num_labels = config.num_labels + self.convbert = TFConvBertMainLayer(config, name="convbert") + classifier_dropout = ( + config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob + ) + self.dropout = keras.layers.Dropout(classifier_dropout) + self.classifier = keras.layers.Dense( + config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" + ) + self.config = config + + @unpack_inputs + @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFTokenClassifierOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + labels: tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[Tuple, TFTokenClassifierOutput]: + r""" + labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): + Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. + """ + outputs = self.convbert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + sequence_output = outputs[0] + sequence_output = self.dropout(sequence_output, training=training) + logits = self.classifier(sequence_output) + loss = None if labels is None else self.hf_compute_loss(labels, logits) + + if not return_dict: + output = (logits,) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TFTokenClassifierOutput( + loss=loss, + logits=logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "convbert", None) is not None: + with tf.name_scope(self.convbert.name): + self.convbert.build(None) + if getattr(self, "classifier", None) is not None: + with tf.name_scope(self.classifier.name): + self.classifier.build([None, None, self.config.hidden_size]) + + +@add_start_docstrings( + """ + ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear + layer on top of the hidden-states output to compute `span start logits` and `span end logits`). + """, + CONVBERT_START_DOCSTRING, +) +class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss): + def __init__(self, config, *inputs, **kwargs): + super().__init__(config, *inputs, **kwargs) + + self.num_labels = config.num_labels + self.convbert = TFConvBertMainLayer(config, name="convbert") + self.qa_outputs = keras.layers.Dense( + config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" + ) + self.config = config + + @unpack_inputs + @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) + @add_code_sample_docstrings( + checkpoint=_CHECKPOINT_FOR_DOC, + output_type=TFQuestionAnsweringModelOutput, + config_class=_CONFIG_FOR_DOC, + ) + def call( + self, + input_ids: TFModelInputType | None = None, + attention_mask: np.ndarray | tf.Tensor | None = None, + token_type_ids: np.ndarray | tf.Tensor | None = None, + position_ids: np.ndarray | tf.Tensor | None = None, + head_mask: np.ndarray | tf.Tensor | None = None, + inputs_embeds: tf.Tensor | None = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + start_positions: tf.Tensor | None = None, + end_positions: tf.Tensor | None = None, + training: Optional[bool] = False, + ) -> Union[Tuple, TFQuestionAnsweringModelOutput]: + r""" + start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the start of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*): + Labels for position (index) of the end of the labelled span for computing the token classification loss. + Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence + are not taken into account for computing the loss. + """ + outputs = self.convbert( + input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + training=training, + ) + sequence_output = outputs[0] + logits = self.qa_outputs(sequence_output) + start_logits, end_logits = tf.split(logits, 2, axis=-1) + start_logits = tf.squeeze(start_logits, axis=-1) + end_logits = tf.squeeze(end_logits, axis=-1) + loss = None + + if start_positions is not None and end_positions is not None: + labels = {"start_position": start_positions} + labels["end_position"] = end_positions + loss = self.hf_compute_loss(labels, (start_logits, end_logits)) + + if not return_dict: + output = (start_logits, end_logits) + outputs[1:] + return ((loss,) + output) if loss is not None else output + + return TFQuestionAnsweringModelOutput( + loss=loss, + start_logits=start_logits, + end_logits=end_logits, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + def build(self, input_shape=None): + if self.built: + return + self.built = True + if getattr(self, "convbert", None) is not None: + with tf.name_scope(self.convbert.name): + self.convbert.build(None) + if getattr(self, "qa_outputs", None) is not None: + with tf.name_scope(self.qa_outputs.name): + self.qa_outputs.build([None, None, self.config.hidden_size]) diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert.py b/venv/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert.py new file mode 100644 index 0000000000000000000000000000000000000000..c0fe2c018341c55b2446a6f12052a3fa36bd9246 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert.py @@ -0,0 +1,503 @@ +# coding=utf-8 +# Copyright 2018 The HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for ConvBERT.""" +import collections +import os +import unicodedata +from typing import List, Optional, Tuple + +from ...tokenization_utils import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace +from ...utils import logging + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} + + +# Copied from transformers.models.bert.tokenization_bert.load_vocab +def load_vocab(vocab_file): + """Loads a vocabulary file into a dictionary.""" + vocab = collections.OrderedDict() + with open(vocab_file, "r", encoding="utf-8") as reader: + tokens = reader.readlines() + for index, token in enumerate(tokens): + token = token.rstrip("\n") + vocab[token] = index + return vocab + + +# Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize +def whitespace_tokenize(text): + """Runs basic whitespace cleaning and splitting on a piece of text.""" + text = text.strip() + if not text: + return [] + tokens = text.split() + return tokens + + +# Copied from transformers.models.bert.tokenization_bert.BertTokenizer with bert-base-cased->YituTech/conv-bert-base, ConvBertTokenizer->BertTokenizer, BERT->ConvBERT +class ConvBertTokenizer(PreTrainedTokenizer): + r""" + Construct a ConvBERT tokenizer. Based on WordPiece. + + This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to + this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + do_basic_tokenize (`bool`, *optional*, defaults to `True`): + Whether or not to do basic tokenization before WordPiece. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original ConvBERT). + """ + + vocab_files_names = VOCAB_FILES_NAMES + + def __init__( + self, + vocab_file, + do_lower_case=True, + do_basic_tokenize=True, + never_split=None, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs, + ): + if not os.path.isfile(vocab_file): + raise ValueError( + f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained" + " model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" + ) + self.vocab = load_vocab(vocab_file) + self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()]) + self.do_basic_tokenize = do_basic_tokenize + if do_basic_tokenize: + self.basic_tokenizer = BasicTokenizer( + do_lower_case=do_lower_case, + never_split=never_split, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + ) + + self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token)) + + super().__init__( + do_lower_case=do_lower_case, + do_basic_tokenize=do_basic_tokenize, + never_split=never_split, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + @property + def do_lower_case(self): + return self.basic_tokenizer.do_lower_case + + @property + def vocab_size(self): + return len(self.vocab) + + def get_vocab(self): + return dict(self.vocab, **self.added_tokens_encoder) + + def _tokenize(self, text, split_special_tokens=False): + split_tokens = [] + if self.do_basic_tokenize: + for token in self.basic_tokenizer.tokenize( + text, never_split=self.all_special_tokens if not split_special_tokens else None + ): + # If the token is part of the never_split set + if token in self.basic_tokenizer.never_split: + split_tokens.append(token) + else: + split_tokens += self.wordpiece_tokenizer.tokenize(token) + else: + split_tokens = self.wordpiece_tokenizer.tokenize(text) + return split_tokens + + def _convert_token_to_id(self, token): + """Converts a token (str) in an id using the vocab.""" + return self.vocab.get(token, self.vocab.get(self.unk_token)) + + def _convert_id_to_token(self, index): + """Converts an index (integer) in a token (str) using the vocab.""" + return self.ids_to_tokens.get(index, self.unk_token) + + def convert_tokens_to_string(self, tokens): + """Converts a sequence of tokens (string) in a single string.""" + out_string = " ".join(tokens).replace(" ##", "").strip() + return out_string + + def build_inputs_with_special_tokens( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A ConvBERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + if token_ids_1 is None: + return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + cls = [self.cls_token_id] + sep = [self.sep_token_id] + return cls + token_ids_0 + sep + token_ids_1 + sep + + def get_special_tokens_mask( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False + ) -> List[int]: + """ + Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding + special tokens using the tokenizer `prepare_for_model` method. + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + already_has_special_tokens (`bool`, *optional*, defaults to `False`): + Whether or not the token list is already formatted with special tokens for the model. + + Returns: + `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. + """ + + if already_has_special_tokens: + return super().get_special_tokens_mask( + token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True + ) + + if token_ids_1 is not None: + return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] + return [1] + ([0] * len(token_ids_0)) + [1] + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + index = 0 + if os.path.isdir(save_directory): + vocab_file = os.path.join( + save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] + ) + else: + vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory + with open(vocab_file, "w", encoding="utf-8") as writer: + for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]): + if index != token_index: + logger.warning( + f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive." + " Please check that the vocabulary is not corrupted!" + ) + index = token_index + writer.write(token + "\n") + index += 1 + return (vocab_file,) + + +# Copied from transformers.models.bert.tokenization_bert.BasicTokenizer +class BasicTokenizer(object): + """ + Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.). + + Args: + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + never_split (`Iterable`, *optional*): + Collection of tokens which will never be split during tokenization. Only has an effect when + `do_basic_tokenize=True` + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. + + This should likely be deactivated for Japanese (see this + [issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original BERT). + do_split_on_punc (`bool`, *optional*, defaults to `True`): + In some instances we want to skip the basic punctuation splitting so that later tokenization can capture + the full context of the words, such as contractions. + """ + + def __init__( + self, + do_lower_case=True, + never_split=None, + tokenize_chinese_chars=True, + strip_accents=None, + do_split_on_punc=True, + ): + if never_split is None: + never_split = [] + self.do_lower_case = do_lower_case + self.never_split = set(never_split) + self.tokenize_chinese_chars = tokenize_chinese_chars + self.strip_accents = strip_accents + self.do_split_on_punc = do_split_on_punc + + def tokenize(self, text, never_split=None): + """ + Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer. + + Args: + never_split (`List[str]`, *optional*) + Kept for backward compatibility purposes. Now implemented directly at the base class level (see + [`PreTrainedTokenizer.tokenize`]) List of token not to split. + """ + # union() returns a new set by concatenating the two sets. + never_split = self.never_split.union(set(never_split)) if never_split else self.never_split + text = self._clean_text(text) + + # This was added on November 1st, 2018 for the multilingual and Chinese + # models. This is also applied to the English models now, but it doesn't + # matter since the English models were not trained on any Chinese data + # and generally don't have any Chinese data in them (there are Chinese + # characters in the vocabulary because Wikipedia does have some Chinese + # words in the English Wikipedia.). + if self.tokenize_chinese_chars: + text = self._tokenize_chinese_chars(text) + # prevents treating the same character with different unicode codepoints as different characters + unicode_normalized_text = unicodedata.normalize("NFC", text) + orig_tokens = whitespace_tokenize(unicode_normalized_text) + split_tokens = [] + for token in orig_tokens: + if token not in never_split: + if self.do_lower_case: + token = token.lower() + if self.strip_accents is not False: + token = self._run_strip_accents(token) + elif self.strip_accents: + token = self._run_strip_accents(token) + split_tokens.extend(self._run_split_on_punc(token, never_split)) + + output_tokens = whitespace_tokenize(" ".join(split_tokens)) + return output_tokens + + def _run_strip_accents(self, text): + """Strips accents from a piece of text.""" + text = unicodedata.normalize("NFD", text) + output = [] + for char in text: + cat = unicodedata.category(char) + if cat == "Mn": + continue + output.append(char) + return "".join(output) + + def _run_split_on_punc(self, text, never_split=None): + """Splits punctuation on a piece of text.""" + if not self.do_split_on_punc or (never_split is not None and text in never_split): + return [text] + chars = list(text) + i = 0 + start_new_word = True + output = [] + while i < len(chars): + char = chars[i] + if _is_punctuation(char): + output.append([char]) + start_new_word = True + else: + if start_new_word: + output.append([]) + start_new_word = False + output[-1].append(char) + i += 1 + + return ["".join(x) for x in output] + + def _tokenize_chinese_chars(self, text): + """Adds whitespace around any CJK character.""" + output = [] + for char in text: + cp = ord(char) + if self._is_chinese_char(cp): + output.append(" ") + output.append(char) + output.append(" ") + else: + output.append(char) + return "".join(output) + + def _is_chinese_char(self, cp): + """Checks whether CP is the codepoint of a CJK character.""" + # This defines a "chinese character" as anything in the CJK Unicode block: + # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) + # + # Note that the CJK Unicode block is NOT all Japanese and Korean characters, + # despite its name. The modern Korean Hangul alphabet is a different block, + # as is Japanese Hiragana and Katakana. Those alphabets are used to write + # space-separated words, so they are not treated specially and handled + # like the all of the other languages. + if ( + (cp >= 0x4E00 and cp <= 0x9FFF) + or (cp >= 0x3400 and cp <= 0x4DBF) # + or (cp >= 0x20000 and cp <= 0x2A6DF) # + or (cp >= 0x2A700 and cp <= 0x2B73F) # + or (cp >= 0x2B740 and cp <= 0x2B81F) # + or (cp >= 0x2B820 and cp <= 0x2CEAF) # + or (cp >= 0xF900 and cp <= 0xFAFF) + or (cp >= 0x2F800 and cp <= 0x2FA1F) # + ): # + return True + + return False + + def _clean_text(self, text): + """Performs invalid character removal and whitespace cleanup on text.""" + output = [] + for char in text: + cp = ord(char) + if cp == 0 or cp == 0xFFFD or _is_control(char): + continue + if _is_whitespace(char): + output.append(" ") + else: + output.append(char) + return "".join(output) + + +# Copied from transformers.models.bert.tokenization_bert.WordpieceTokenizer +class WordpieceTokenizer(object): + """Runs WordPiece tokenization.""" + + def __init__(self, vocab, unk_token, max_input_chars_per_word=100): + self.vocab = vocab + self.unk_token = unk_token + self.max_input_chars_per_word = max_input_chars_per_word + + def tokenize(self, text): + """ + Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform + tokenization using the given vocabulary. + + For example, `input = "unaffable"` wil return as output `["un", "##aff", "##able"]`. + + Args: + text: A single token or whitespace separated tokens. This should have + already been passed through *BasicTokenizer*. + + Returns: + A list of wordpiece tokens. + """ + + output_tokens = [] + for token in whitespace_tokenize(text): + chars = list(token) + if len(chars) > self.max_input_chars_per_word: + output_tokens.append(self.unk_token) + continue + + is_bad = False + start = 0 + sub_tokens = [] + while start < len(chars): + end = len(chars) + cur_substr = None + while start < end: + substr = "".join(chars[start:end]) + if start > 0: + substr = "##" + substr + if substr in self.vocab: + cur_substr = substr + break + end -= 1 + if cur_substr is None: + is_bad = True + break + sub_tokens.append(cur_substr) + start = end + + if is_bad: + output_tokens.append(self.unk_token) + else: + output_tokens.extend(sub_tokens) + return output_tokens diff --git a/venv/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert_fast.py b/venv/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert_fast.py new file mode 100644 index 0000000000000000000000000000000000000000..65bedb73fe9171f0473c7d5a35c08dee7432eb78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/convbert/tokenization_convbert_fast.py @@ -0,0 +1,172 @@ +# coding=utf-8 +# Copyright The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tokenization classes for ConvBERT.""" +import json +from typing import List, Optional, Tuple + +from tokenizers import normalizers + +from ...tokenization_utils_fast import PreTrainedTokenizerFast +from ...utils import logging +from .tokenization_convbert import ConvBertTokenizer + + +logger = logging.get_logger(__name__) + +VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"} + + +# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->YituTech/conv-bert-base, Bert->ConvBert, BERT->ConvBERT +class ConvBertTokenizerFast(PreTrainedTokenizerFast): + r""" + Construct a "fast" ConvBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece. + + This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should + refer to this superclass for more information regarding those methods. + + Args: + vocab_file (`str`): + File containing the vocabulary. + do_lower_case (`bool`, *optional*, defaults to `True`): + Whether or not to lowercase the input when tokenizing. + unk_token (`str`, *optional*, defaults to `"[UNK]"`): + The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this + token instead. + sep_token (`str`, *optional*, defaults to `"[SEP]"`): + The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for + sequence classification or for a text and a question for question answering. It is also used as the last + token of a sequence built with special tokens. + pad_token (`str`, *optional*, defaults to `"[PAD]"`): + The token used for padding, for example when batching sequences of different lengths. + cls_token (`str`, *optional*, defaults to `"[CLS]"`): + The classifier token which is used when doing sequence classification (classification of the whole sequence + instead of per-token classification). It is the first token of the sequence when built with special tokens. + mask_token (`str`, *optional*, defaults to `"[MASK]"`): + The token used for masking values. This is the token used when training this model with masked language + modeling. This is the token which the model will try to predict. + clean_text (`bool`, *optional*, defaults to `True`): + Whether or not to clean the text before tokenization by removing any control characters and replacing all + whitespaces by the classic one. + tokenize_chinese_chars (`bool`, *optional*, defaults to `True`): + Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this + issue](https://github.com/huggingface/transformers/issues/328)). + strip_accents (`bool`, *optional*): + Whether or not to strip all accents. If this option is not specified, then it will be determined by the + value for `lowercase` (as in the original ConvBERT). + wordpieces_prefix (`str`, *optional*, defaults to `"##"`): + The prefix for subwords. + """ + + vocab_files_names = VOCAB_FILES_NAMES + slow_tokenizer_class = ConvBertTokenizer + + def __init__( + self, + vocab_file=None, + tokenizer_file=None, + do_lower_case=True, + unk_token="[UNK]", + sep_token="[SEP]", + pad_token="[PAD]", + cls_token="[CLS]", + mask_token="[MASK]", + tokenize_chinese_chars=True, + strip_accents=None, + **kwargs, + ): + super().__init__( + vocab_file, + tokenizer_file=tokenizer_file, + do_lower_case=do_lower_case, + unk_token=unk_token, + sep_token=sep_token, + pad_token=pad_token, + cls_token=cls_token, + mask_token=mask_token, + tokenize_chinese_chars=tokenize_chinese_chars, + strip_accents=strip_accents, + **kwargs, + ) + + normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__()) + if ( + normalizer_state.get("lowercase", do_lower_case) != do_lower_case + or normalizer_state.get("strip_accents", strip_accents) != strip_accents + or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars + ): + normalizer_class = getattr(normalizers, normalizer_state.pop("type")) + normalizer_state["lowercase"] = do_lower_case + normalizer_state["strip_accents"] = strip_accents + normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars + self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state) + + self.do_lower_case = do_lower_case + + def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): + """ + Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and + adding special tokens. A ConvBERT sequence has the following format: + + - single sequence: `[CLS] X [SEP]` + - pair of sequences: `[CLS] A [SEP] B [SEP]` + + Args: + token_ids_0 (`List[int]`): + List of IDs to which the special tokens will be added. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. + """ + output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id] + + if token_ids_1 is not None: + output += token_ids_1 + [self.sep_token_id] + + return output + + def create_token_type_ids_from_sequences( + self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None + ) -> List[int]: + """ + Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT sequence + pair mask has the following format: + + ``` + 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 + | first sequence | second sequence | + ``` + + If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). + + Args: + token_ids_0 (`List[int]`): + List of IDs. + token_ids_1 (`List[int]`, *optional*): + Optional second list of IDs for sequence pairs. + + Returns: + `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). + """ + sep = [self.sep_token_id] + cls = [self.cls_token_id] + if token_ids_1 is None: + return len(cls + token_ids_0 + sep) * [0] + return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] + + def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: + files = self._tokenizer.model.save(save_directory, name=filename_prefix) + return tuple(files) diff --git a/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ee7744dbce3169e2d60b86d918941b72cbdda9c Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/configuration_vit_mae.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/configuration_vit_mae.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2075126968dbb91572ba789ada21c6158026246 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/configuration_vit_mae.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/convert_vit_mae_to_pytorch.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/convert_vit_mae_to_pytorch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6851df032687252b87e8a9adbbbbf4403a534115 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/convert_vit_mae_to_pytorch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/modeling_tf_vit_mae.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/modeling_tf_vit_mae.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07f80d4d6e2ae02cbdf4cf50a41dc3492aa8d8c7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/modeling_tf_vit_mae.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/modeling_vit_mae.cpython-310.pyc b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/modeling_vit_mae.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..170c164778e20db3128a1f702a653343901e9fc4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/__pycache__/modeling_vit_mae.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/transformers/models/vit_mae/configuration_vit_mae.py b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/configuration_vit_mae.py new file mode 100644 index 0000000000000000000000000000000000000000..c5866ef40b497c509734b668f4f044e8b93b1883 --- /dev/null +++ b/venv/lib/python3.10/site-packages/transformers/models/vit_mae/configuration_vit_mae.py @@ -0,0 +1,140 @@ +# coding=utf-8 +# Copyright 2022 Facebook AI and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" ViT MAE model configuration""" + +from ...configuration_utils import PretrainedConfig +from ...utils import logging + + +logger = logging.get_logger(__name__) + + +from ..deprecated._archive_maps import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402 + + +class ViTMAEConfig(PretrainedConfig): + r""" + This is the configuration class to store the configuration of a [`ViTMAEModel`]. It is used to instantiate an ViT + MAE model according to the specified arguments, defining the model architecture. Instantiating a configuration with + the defaults will yield a similar configuration to that of the ViT + [facebook/vit-mae-base](https://huggingface.co/facebook/vit-mae-base) architecture. + + Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the + documentation from [`PretrainedConfig`] for more information. + + + Args: + hidden_size (`int`, *optional*, defaults to 768): + Dimensionality of the encoder layers and the pooler layer. + num_hidden_layers (`int`, *optional*, defaults to 12): + Number of hidden layers in the Transformer encoder. + num_attention_heads (`int`, *optional*, defaults to 12): + Number of attention heads for each attention layer in the Transformer encoder. + intermediate_size (`int`, *optional*, defaults to 3072): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. + hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): + The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, + `"relu"`, `"selu"` and `"gelu_new"` are supported. + hidden_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. + attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): + The dropout ratio for the attention probabilities. + initializer_range (`float`, *optional*, defaults to 0.02): + The standard deviation of the truncated_normal_initializer for initializing all weight matrices. + layer_norm_eps (`float`, *optional*, defaults to 1e-12): + The epsilon used by the layer normalization layers. + image_size (`int`, *optional*, defaults to 224): + The size (resolution) of each image. + patch_size (`int`, *optional*, defaults to 16): + The size (resolution) of each patch. + num_channels (`int`, *optional*, defaults to 3): + The number of input channels. + qkv_bias (`bool`, *optional*, defaults to `True`): + Whether to add a bias to the queries, keys and values. + decoder_num_attention_heads (`int`, *optional*, defaults to 16): + Number of attention heads for each attention layer in the decoder. + decoder_hidden_size (`int`, *optional*, defaults to 512): + Dimensionality of the decoder. + decoder_num_hidden_layers (`int`, *optional*, defaults to 8): + Number of hidden layers in the decoder. + decoder_intermediate_size (`int`, *optional*, defaults to 2048): + Dimensionality of the "intermediate" (i.e., feed-forward) layer in the decoder. + mask_ratio (`float`, *optional*, defaults to 0.75): + The ratio of the number of masked tokens in the input sequence. + norm_pix_loss (`bool`, *optional*, defaults to `False`): + Whether or not to train with normalized pixels (see Table 3 in the paper). Using normalized pixels improved + representation quality in the experiments of the authors. + + Example: + + ```python + >>> from transformers import ViTMAEConfig, ViTMAEModel + + >>> # Initializing a ViT MAE vit-mae-base style configuration + >>> configuration = ViTMAEConfig() + + >>> # Initializing a model (with random weights) from the vit-mae-base style configuration + >>> model = ViTMAEModel(configuration) + + >>> # Accessing the model configuration + >>> configuration = model.config + ```""" + + model_type = "vit_mae" + + def __init__( + self, + hidden_size=768, + num_hidden_layers=12, + num_attention_heads=12, + intermediate_size=3072, + hidden_act="gelu", + hidden_dropout_prob=0.0, + attention_probs_dropout_prob=0.0, + initializer_range=0.02, + layer_norm_eps=1e-12, + image_size=224, + patch_size=16, + num_channels=3, + qkv_bias=True, + decoder_num_attention_heads=16, + decoder_hidden_size=512, + decoder_num_hidden_layers=8, + decoder_intermediate_size=2048, + mask_ratio=0.75, + norm_pix_loss=False, + **kwargs, + ): + super().__init__(**kwargs) + + self.hidden_size = hidden_size + self.num_hidden_layers = num_hidden_layers + self.num_attention_heads = num_attention_heads + self.intermediate_size = intermediate_size + self.hidden_act = hidden_act + self.hidden_dropout_prob = hidden_dropout_prob + self.attention_probs_dropout_prob = attention_probs_dropout_prob + self.initializer_range = initializer_range + self.layer_norm_eps = layer_norm_eps + self.image_size = image_size + self.patch_size = patch_size + self.num_channels = num_channels + self.qkv_bias = qkv_bias + self.decoder_num_attention_heads = decoder_num_attention_heads + self.decoder_hidden_size = decoder_hidden_size + self.decoder_num_hidden_layers = decoder_num_hidden_layers + self.decoder_intermediate_size = decoder_intermediate_size + self.mask_ratio = mask_ratio + self.norm_pix_loss = norm_pix_loss