applied-ai-018 commited on
Commit
8b9d80c
·
verified ·
1 Parent(s): 50b2833

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/tests/testdata/anagrams2-v0-greedy_until +1 -0
  2. lm-evaluation/tests/testdata/anagrams2-v0-res.json +1 -0
  3. lm-evaluation/tests/testdata/arithmetic_1dc-v0-loglikelihood +1 -0
  4. lm-evaluation/tests/testdata/arithmetic_2ds-v0-loglikelihood +1 -0
  5. lm-evaluation/tests/testdata/arithmetic_5ds-v0-loglikelihood +1 -0
  6. lm-evaluation/tests/testdata/blimp_adjunct_island-v0-res.json +1 -0
  7. lm-evaluation/tests/testdata/blimp_anaphor_gender_agreement-v0-res.json +1 -0
  8. lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_irregular_2-v0-loglikelihood +1 -0
  9. lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_1-v0-loglikelihood +1 -0
  10. lm-evaluation/tests/testdata/blimp_existential_there_object_raising-v0-loglikelihood +1 -0
  11. lm-evaluation/tests/testdata/blimp_expletive_it_object_raising-v0-loglikelihood +1 -0
  12. lm-evaluation/tests/testdata/blimp_left_branch_island_echo_question-v0-res.json +1 -0
  13. lm-evaluation/tests/testdata/blimp_left_branch_island_simple_question-v0-loglikelihood +1 -0
  14. lm-evaluation/tests/testdata/blimp_npi_present_2-v0-loglikelihood +1 -0
  15. lm-evaluation/tests/testdata/blimp_only_npi_licensor_present-v0-res.json +1 -0
  16. lm-evaluation/tests/testdata/blimp_only_npi_scope-v0-res.json +1 -0
  17. lm-evaluation/tests/testdata/blimp_principle_A_case_1-v0-loglikelihood +1 -0
  18. lm-evaluation/tests/testdata/blimp_principle_A_domain_1-v0-loglikelihood +1 -0
  19. lm-evaluation/tests/testdata/blimp_principle_A_domain_2-v0-res.json +1 -0
  20. lm-evaluation/tests/testdata/blimp_principle_A_domain_3-v0-res.json +1 -0
  21. lm-evaluation/tests/testdata/blimp_principle_A_reconstruction-v0-res.json +1 -0
  22. lm-evaluation/tests/testdata/blimp_sentential_negation_npi_scope-v0-loglikelihood +1 -0
  23. lm-evaluation/tests/testdata/blimp_sentential_negation_npi_scope-v0-res.json +1 -0
  24. lm-evaluation/tests/testdata/blimp_superlative_quantifiers_2-v0-loglikelihood +1 -0
  25. lm-evaluation/tests/testdata/blimp_tough_vs_raising_1-v0-res.json +1 -0
  26. lm-evaluation/tests/testdata/blimp_wh_questions_subject_gap_long_distance-v0-loglikelihood +1 -0
  27. lm-evaluation/tests/testdata/blimp_wh_vs_that_with_gap_long_distance-v0-res.json +1 -0
  28. lm-evaluation/tests/testdata/cola-v0-res.json +1 -0
  29. lm-evaluation/tests/testdata/coqa-v1-greedy_until +1 -0
  30. lm-evaluation/tests/testdata/crows_pairs_english-v0-res.json +1 -0
  31. lm-evaluation/tests/testdata/crows_pairs_english_disability-v0-res.json +1 -0
  32. lm-evaluation/tests/testdata/crows_pairs_english_nationality-v0-loglikelihood +1 -0
  33. lm-evaluation/tests/testdata/crows_pairs_french_gender-v0-res.json +1 -0
  34. lm-evaluation/tests/testdata/crows_pairs_french_physical_appearance-v0-loglikelihood +1 -0
  35. lm-evaluation/tests/testdata/cycle_letters-v0-res.json +1 -0
  36. lm-evaluation/tests/testdata/ethics_utilitarianism_original-v0-loglikelihood +1 -0
  37. lm-evaluation/tests/testdata/hendrycksTest-business_ethics-v0-res.json +1 -0
  38. lm-evaluation/tests/testdata/hendrycksTest-college_medicine-v0-loglikelihood +1 -0
  39. lm-evaluation/tests/testdata/hendrycksTest-conceptual_physics-v0-loglikelihood +1 -0
  40. lm-evaluation/tests/testdata/hendrycksTest-high_school_computer_science-v0-res.json +1 -0
  41. lm-evaluation/tests/testdata/hendrycksTest-high_school_government_and_politics-v0-res.json +1 -0
  42. lm-evaluation/tests/testdata/hendrycksTest-human_sexuality-v0-res.json +1 -0
  43. lm-evaluation/tests/testdata/hendrycksTest-machine_learning-v0-res.json +1 -0
  44. lm-evaluation/tests/testdata/hendrycksTest-professional_law-v0-loglikelihood +1 -0
  45. lm-evaluation/tests/testdata/hendrycksTest-professional_psychology-v0-res.json +1 -0
  46. lm-evaluation/tests/testdata/hendrycksTest-security_studies-v0-res.json +1 -0
  47. lm-evaluation/tests/testdata/lambada-v0-loglikelihood +1 -0
  48. lm-evaluation/tests/testdata/lambada_openai_mt_fr-v0-res.json +1 -0
  49. lm-evaluation/tests/testdata/math_geometry-v1-greedy_until +1 -0
  50. lm-evaluation/tests/testdata/mrpc-v0-res.json +1 -0
lm-evaluation/tests/testdata/anagrams2-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 6700a3c44e48abe8337238dcbe3b54cf4abafe0c204c52d921e590872fbd05e7
lm-evaluation/tests/testdata/anagrams2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"anagrams2": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"anagrams2": 0}}
lm-evaluation/tests/testdata/arithmetic_1dc-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 04c3a63a6b3c579bd3775d92b3076ba9130041d5ce7cf9244d3f86e95c804387
lm-evaluation/tests/testdata/arithmetic_2ds-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 66f7ff3b40251ee38fadcbee658e309a200224356fc3efa07d0a490a2c24bfa3
lm-evaluation/tests/testdata/arithmetic_5ds-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 2888d6d098a5ef8c1e7f0d8295ba80826e2e04e431f57508dfb71d53e1cd4604
lm-evaluation/tests/testdata/blimp_adjunct_island-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_adjunct_island": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_adjunct_island": 0}}
lm-evaluation/tests/testdata/blimp_anaphor_gender_agreement-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_anaphor_gender_agreement": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_anaphor_gender_agreement": 0}}
lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_irregular_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ddb24ddfaebe076b3aa7107937d71bf5f4503a78283bc889e39200368603681e
lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ad61c619aa79433d02f1aeacde2ab87291fd5d5c370032c24d41c4f0065ed1f9
lm-evaluation/tests/testdata/blimp_existential_there_object_raising-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 63567712076256f373131971676c1c6d711efef73cd0e4de3cc639bc631a2413
lm-evaluation/tests/testdata/blimp_expletive_it_object_raising-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ceede5b38248a62125a74a8332602b8eac5ef40864f071ad8d86e7971e07219d
lm-evaluation/tests/testdata/blimp_left_branch_island_echo_question-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_left_branch_island_echo_question": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_left_branch_island_echo_question": 0}}
lm-evaluation/tests/testdata/blimp_left_branch_island_simple_question-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6cb36bbdae7754f8832f50872c3dd511ce12547e00fa0771deb747be3355eb85
lm-evaluation/tests/testdata/blimp_npi_present_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ fdb688ac6259bb65d234ef0a36e9a9ee449f9608f633b12e1943b462aead8e17
lm-evaluation/tests/testdata/blimp_only_npi_licensor_present-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_only_npi_licensor_present": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_only_npi_licensor_present": 0}}
lm-evaluation/tests/testdata/blimp_only_npi_scope-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_only_npi_scope": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_only_npi_scope": 0}}
lm-evaluation/tests/testdata/blimp_principle_A_case_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 49d2b8ce6667a6166fdc2a2e5dbe7ff07d9b8415e9f33482aef15956b3ebc24a
lm-evaluation/tests/testdata/blimp_principle_A_domain_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 290e7eddacea4ec16989af697f2ee3373fdd9aef4b452bf887184c6e2f6e7d9d
lm-evaluation/tests/testdata/blimp_principle_A_domain_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_domain_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_2": 0}}
lm-evaluation/tests/testdata/blimp_principle_A_domain_3-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_domain_3": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_3": 0}}
lm-evaluation/tests/testdata/blimp_principle_A_reconstruction-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_reconstruction": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_reconstruction": 0}}
lm-evaluation/tests/testdata/blimp_sentential_negation_npi_scope-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 32fcbd0a1c6e664af2751bad552587b5ca3911973b07f4fb2cf0a2acd3de5349
lm-evaluation/tests/testdata/blimp_sentential_negation_npi_scope-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_sentential_negation_npi_scope": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_sentential_negation_npi_scope": 0}}
lm-evaluation/tests/testdata/blimp_superlative_quantifiers_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 59c20ff0f632cf42afc74ecc682cf92e5e740417b01e6cf9a610a3bc544d2ea5
lm-evaluation/tests/testdata/blimp_tough_vs_raising_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_tough_vs_raising_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_tough_vs_raising_1": 0}}
lm-evaluation/tests/testdata/blimp_wh_questions_subject_gap_long_distance-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 37483dfda688b62ad27161c9fc1e1e7710c5a6e6a7cd3474df119bcafd30e97f
lm-evaluation/tests/testdata/blimp_wh_vs_that_with_gap_long_distance-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_wh_vs_that_with_gap_long_distance": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_vs_that_with_gap_long_distance": 0}}
lm-evaluation/tests/testdata/cola-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"cola": {"mcc": -0.04538802810223175, "mcc_stderr": 0.023100371589225246}}, "versions": {"cola": 0}}
lm-evaluation/tests/testdata/coqa-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 57581470b921435d40da97872bb1cfda6ecf963ccc4b0240a3b04e3fea8c8e3a
lm-evaluation/tests/testdata/crows_pairs_english-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english": {"likelihood_difference": 0.3367363060632734, "likelihood_difference_stderr": 0.005827747024053628, "pct_stereotype": 0.5062611806797853, "pct_stereotype_stderr": 0.012212341600228745}}, "versions": {"crows_pairs_english": 0}}
lm-evaluation/tests/testdata/crows_pairs_english_disability-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english_disability": {"likelihood_difference": 0.3148684792547637, "likelihood_difference_stderr": 0.02800803147051987, "pct_stereotype": 0.36923076923076925, "pct_stereotype_stderr": 0.06032456592830047}}, "versions": {"crows_pairs_english_disability": 0}}
lm-evaluation/tests/testdata/crows_pairs_english_nationality-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ b85bc849811ccfa9971a6ee3fca7342752c314c0cb6f126e10d9ec4d0450c541
lm-evaluation/tests/testdata/crows_pairs_french_gender-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_french_gender": {"likelihood_difference": 0.3364019171359413, "likelihood_difference_stderr": 0.012815700745990895, "pct_stereotype": 0.4766355140186916, "pct_stereotype_stderr": 0.027920316348204986}}, "versions": {"crows_pairs_french_gender": 0}}
lm-evaluation/tests/testdata/crows_pairs_french_physical_appearance-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ea61eaad64e9292790d4bbef955ffeebed7a595de098bc5ac726a6e51f27f9af
lm-evaluation/tests/testdata/cycle_letters-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"cycle_letters": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"cycle_letters": 0}}
lm-evaluation/tests/testdata/ethics_utilitarianism_original-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 5b42ba1faf5ece6a6ec9a3976ce79c1fac8df5b98272aab85457188c2142693c
lm-evaluation/tests/testdata/hendrycksTest-business_ethics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-business_ethics": {"acc": 0.29, "acc_norm": 0.27, "acc_norm_stderr": 0.044619604333847394, "acc_stderr": 0.045604802157206845}}, "versions": {"hendrycksTest-business_ethics": 0}}
lm-evaluation/tests/testdata/hendrycksTest-college_medicine-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ dd6e0a9be1407890e9f8cd4434fb6aa4752ab3d2473837fd465ad99f60ad685e
lm-evaluation/tests/testdata/hendrycksTest-conceptual_physics-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 622f191ccfc7a597d99f39897ebe3f95a9ddce0e662fcfb411aa554b289bb355
lm-evaluation/tests/testdata/hendrycksTest-high_school_computer_science-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_computer_science": {"acc": 0.2, "acc_norm": 0.22, "acc_norm_stderr": 0.04163331998932269, "acc_stderr": 0.04020151261036845}}, "versions": {"hendrycksTest-high_school_computer_science": 0}}
lm-evaluation/tests/testdata/hendrycksTest-high_school_government_and_politics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_government_and_politics": {"acc": 0.24352331606217617, "acc_norm": 0.23834196891191708, "acc_norm_stderr": 0.03074890536390988, "acc_stderr": 0.030975436386845436}}, "versions": {"hendrycksTest-high_school_government_and_politics": 0}}
lm-evaluation/tests/testdata/hendrycksTest-human_sexuality-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-human_sexuality": {"acc": 0.22137404580152673, "acc_norm": 0.22900763358778625, "acc_norm_stderr": 0.036853466317118506, "acc_stderr": 0.0364129708131373}}, "versions": {"hendrycksTest-human_sexuality": 0}}
lm-evaluation/tests/testdata/hendrycksTest-machine_learning-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-machine_learning": {"acc": 0.23214285714285715, "acc_norm": 0.22321428571428573, "acc_norm_stderr": 0.039523019677025116, "acc_stderr": 0.04007341809755806}}, "versions": {"hendrycksTest-machine_learning": 0}}
lm-evaluation/tests/testdata/hendrycksTest-professional_law-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ c38c9d5d84eeb7a5f3c4a34d6e70d7e15847b3c38f26e4b119c982bb935e118f
lm-evaluation/tests/testdata/hendrycksTest-professional_psychology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-professional_psychology": {"acc": 0.27124183006535946, "acc_norm": 0.2826797385620915, "acc_norm_stderr": 0.01821726955205344, "acc_stderr": 0.01798661530403031}}, "versions": {"hendrycksTest-professional_psychology": 0}}
lm-evaluation/tests/testdata/hendrycksTest-security_studies-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-security_studies": {"acc": 0.2979591836734694, "acc_norm": 0.2693877551020408, "acc_norm_stderr": 0.02840125202902294, "acc_stderr": 0.029279567411065674}}, "versions": {"hendrycksTest-security_studies": 0}}
lm-evaluation/tests/testdata/lambada-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6829e6a8aa5922e6c92dd31403cc060f242dc0ede4a775e085a70da095ab2e20
lm-evaluation/tests/testdata/lambada_openai_mt_fr-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_openai_mt_fr": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_mt_fr": 0}}
lm-evaluation/tests/testdata/math_geometry-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 46bc4cb219b6903397da782699a684bdbb982c0c954ff82e6beeed5c84878f42
lm-evaluation/tests/testdata/mrpc-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"mrpc": {"acc": 0.5392156862745098, "acc_stderr": 0.024707732873723128, "f1": 0.5982905982905982, "f1_stderr": 0.028928325246283727}}, "versions": {"mrpc": 0}}