applied-ai-018 commited on
Commit
ca91c6c
·
verified ·
1 Parent(s): 068e5e3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/tests/testdata/anli_r2-v0-res.json +1 -0
  2. lm-evaluation/tests/testdata/arithmetic_1dc-v0-res.json +1 -0
  3. lm-evaluation/tests/testdata/arithmetic_3ds-v0-res.json +1 -0
  4. lm-evaluation/tests/testdata/blimp_adjunct_island-v0-loglikelihood +1 -0
  5. lm-evaluation/tests/testdata/blimp_coordinate_structure_constraint_complex_left_branch-v0-res.json +1 -0
  6. lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_with_adj_2-v0-loglikelihood +1 -0
  7. lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-loglikelihood +1 -0
  8. lm-evaluation/tests/testdata/blimp_inchoative-v0-res.json +1 -0
  9. lm-evaluation/tests/testdata/blimp_intransitive-v0-res.json +1 -0
  10. lm-evaluation/tests/testdata/blimp_irregular_plural_subject_verb_agreement_2-v0-loglikelihood +1 -0
  11. lm-evaluation/tests/testdata/blimp_left_branch_island_echo_question-v0-loglikelihood +1 -0
  12. lm-evaluation/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-loglikelihood +1 -0
  13. lm-evaluation/tests/testdata/blimp_superlative_quantifiers_1-v0-loglikelihood +1 -0
  14. lm-evaluation/tests/testdata/blimp_wh_island-v0-res.json +1 -0
  15. lm-evaluation/tests/testdata/blimp_wh_questions_subject_gap-v0-res.json +1 -0
  16. lm-evaluation/tests/testdata/blimp_wh_vs_that_no_gap-v0-loglikelihood +1 -0
  17. lm-evaluation/tests/testdata/blimp_wh_vs_that_with_gap-v0-loglikelihood +1 -0
  18. lm-evaluation/tests/testdata/cb-v0-res.json +1 -0
  19. lm-evaluation/tests/testdata/crows_pairs_english_gender-v0-loglikelihood +1 -0
  20. lm-evaluation/tests/testdata/crows_pairs_french_religion-v0-res.json +1 -0
  21. lm-evaluation/tests/testdata/crows_pairs_french_sexual_orientation-v0-loglikelihood +1 -0
  22. lm-evaluation/tests/testdata/crows_pairs_french_socioeconomic-v0-res.json +1 -0
  23. lm-evaluation/tests/testdata/cycle_letters-v0-greedy_until +1 -0
  24. lm-evaluation/tests/testdata/ethics_deontology-v0-res.json +1 -0
  25. lm-evaluation/tests/testdata/ethics_virtue-v0-loglikelihood +1 -0
  26. lm-evaluation/tests/testdata/ethics_virtue-v0-res.json +1 -0
  27. lm-evaluation/tests/testdata/gsm8k-v0-greedy_until +1 -0
  28. lm-evaluation/tests/testdata/gsm8k-v0-res.json +1 -0
  29. lm-evaluation/tests/testdata/headqa_en-v0-loglikelihood +1 -0
  30. lm-evaluation/tests/testdata/headqa_en-v0-res.json +1 -0
  31. lm-evaluation/tests/testdata/hendrycksTest-college_biology-v0-res.json +1 -0
  32. lm-evaluation/tests/testdata/hendrycksTest-college_computer_science-v0-loglikelihood +1 -0
  33. lm-evaluation/tests/testdata/hendrycksTest-elementary_mathematics-v0-res.json +1 -0
  34. lm-evaluation/tests/testdata/hendrycksTest-global_facts-v0-loglikelihood +1 -0
  35. lm-evaluation/tests/testdata/hendrycksTest-high_school_european_history-v0-loglikelihood +1 -0
  36. lm-evaluation/tests/testdata/hendrycksTest-high_school_world_history-v0-loglikelihood +1 -0
  37. lm-evaluation/tests/testdata/hendrycksTest-management-v0-res.json +1 -0
  38. lm-evaluation/tests/testdata/hendrycksTest-moral_scenarios-v0-res.json +1 -0
  39. lm-evaluation/tests/testdata/hendrycksTest-prehistory-v0-loglikelihood +1 -0
  40. lm-evaluation/tests/testdata/iwslt17-en-ar-v0-greedy_until +1 -0
  41. lm-evaluation/tests/testdata/lambada_cloze-v0-res.json +1 -0
  42. lm-evaluation/tests/testdata/lambada_openai-v2.0-res.json +1 -0
  43. lm-evaluation/tests/testdata/lambada_openai_mt_es-v0-loglikelihood +1 -0
  44. lm-evaluation/tests/testdata/logiqa-v0-res.json +1 -0
  45. lm-evaluation/tests/testdata/math_counting_and_prob-v0-greedy_until +1 -0
  46. lm-evaluation/tests/testdata/math_intermediate_algebra-v0-greedy_until +1 -0
  47. lm-evaluation/tests/testdata/math_intermediate_algebra-v1-greedy_until +1 -0
  48. lm-evaluation/tests/testdata/math_num_theory-v1-greedy_until +1 -0
  49. lm-evaluation/tests/testdata/mathqa-v0-res.json +1 -0
  50. lm-evaluation/tests/testdata/mutual-v1-res.json +1 -0
lm-evaluation/tests/testdata/anli_r2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"anli_r2": {"acc": 0.356, "acc_stderr": 0.015149042659306628}}, "versions": {"anli_r2": 0}}
lm-evaluation/tests/testdata/arithmetic_1dc-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arithmetic_1dc": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_1dc": 0}}
lm-evaluation/tests/testdata/arithmetic_3ds-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arithmetic_3ds": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_3ds": 0}}
lm-evaluation/tests/testdata/blimp_adjunct_island-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 976a5cac4bdb724632eebd4cb9e522203ce3da8d5525288a597c86e80469f3f2
lm-evaluation/tests/testdata/blimp_coordinate_structure_constraint_complex_left_branch-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_coordinate_structure_constraint_complex_left_branch": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_coordinate_structure_constraint_complex_left_branch": 0}}
lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_with_adj_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 95acb74fac7d57ae2c9d208361a5f8ad36b0b19a055f02e648ed8e99505f4b43
lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ccc64b4d5e80c081d5161aae5828212ba49d277ca8c5a4281f181744727a6a99
lm-evaluation/tests/testdata/blimp_inchoative-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_inchoative": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_inchoative": 0}}
lm-evaluation/tests/testdata/blimp_intransitive-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_intransitive": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_intransitive": 0}}
lm-evaluation/tests/testdata/blimp_irregular_plural_subject_verb_agreement_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 9534751f83a86b6cbe1fb12fb9feb827b0b7836a663108928b4ecc1d70b08871
lm-evaluation/tests/testdata/blimp_left_branch_island_echo_question-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 9852b38612db8c6adf938a5d8a7a9e5ce9e655259d6cc806b142506fcaff0ed4
lm-evaluation/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ e6666c5657215ff4bfd646b8ee3ae6df956e71c0be9ab1c287fb1b68291dd0d1
lm-evaluation/tests/testdata/blimp_superlative_quantifiers_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 8a01f6a5ea87a01c0c9b0c7b3bc4de4711bf0ff050976976651182b9ed34a0d4
lm-evaluation/tests/testdata/blimp_wh_island-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_wh_island": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_island": 0}}
lm-evaluation/tests/testdata/blimp_wh_questions_subject_gap-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_wh_questions_subject_gap": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_questions_subject_gap": 0}}
lm-evaluation/tests/testdata/blimp_wh_vs_that_no_gap-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d1d3e439b2020ef5ed232bfebbcc9634adc5117e9eb61e38fdbbe2c8ea128d54
lm-evaluation/tests/testdata/blimp_wh_vs_that_with_gap-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d41a9b85e4c31e445bf9b46b8642df02203ccc02b4a9b254bf76066d5c54b4b7
lm-evaluation/tests/testdata/cb-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"cb": {"acc": 0.3392857142857143, "acc_stderr": 0.06384226561930825, "f1": 0.2819143819143819}}, "versions": {"cb": 0}}
lm-evaluation/tests/testdata/crows_pairs_english_gender-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 2bf62b7cc678f64ffad4a6e6715ff76a2b984bfe8d1165da4b76b3b4dfafb2f9
lm-evaluation/tests/testdata/crows_pairs_french_religion-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_french_religion": {"likelihood_difference": 0.32691651640972225, "likelihood_difference_stderr": 0.021833493193249474, "pct_stereotype": 0.45217391304347826, "pct_stereotype_stderr": 0.046614569799583463}}, "versions": {"crows_pairs_french_religion": 0}}
lm-evaluation/tests/testdata/crows_pairs_french_sexual_orientation-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 2ce823fdb93d325aa8fb40db5d335b093b4b69792763532d940a752440ee3a76
lm-evaluation/tests/testdata/crows_pairs_french_socioeconomic-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_french_socioeconomic": {"likelihood_difference": 0.3394681494647815, "likelihood_difference_stderr": 0.01702488895584347, "pct_stereotype": 0.4642857142857143, "pct_stereotype_stderr": 0.035714285714285705}}, "versions": {"crows_pairs_french_socioeconomic": 0}}
lm-evaluation/tests/testdata/cycle_letters-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ eb23f7d5de7528eefd8ed5f8054c402ff947319cccfef7195995946f99389201
lm-evaluation/tests/testdata/ethics_deontology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"ethics_deontology": {"acc": 0.503615127919911, "acc_stderr": 0.008338908432085105, "em": 0.07119021134593993}}, "versions": {"ethics_deontology": 0}}
lm-evaluation/tests/testdata/ethics_virtue-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 8021db8de46850090ddae6e6ec2d382029c3027b7c69884607503f916d09b709
lm-evaluation/tests/testdata/ethics_virtue-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"ethics_virtue": {"acc": 0.5035175879396985, "acc_stderr": 0.0070893491553555765, "em": 0.036180904522613064}}, "versions": {"ethics_virtue": 0}}
lm-evaluation/tests/testdata/gsm8k-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ e7292dbdd7fd8419ba954f2e0701e04c8d0e8842fe053dbf2fe47d926630e35e
lm-evaluation/tests/testdata/gsm8k-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"gsm8k": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"gsm8k": 0}}
lm-evaluation/tests/testdata/headqa_en-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 09da45119b12a0144e3081f8fb790c2a22af7b9c3aac42f54423d348a711fbf5
lm-evaluation/tests/testdata/headqa_en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"headqa_en": {"acc": 0.23559445660102116, "acc_norm": 0.2447118891320204, "acc_norm_stderr": 0.008211629406841468, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa_en": 0}}
lm-evaluation/tests/testdata/hendrycksTest-college_biology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-college_biology": {"acc": 0.24305555555555555, "acc_norm": 0.2361111111111111, "acc_norm_stderr": 0.03551446610810826, "acc_stderr": 0.03586879280080341}}, "versions": {"hendrycksTest-college_biology": 0}}
lm-evaluation/tests/testdata/hendrycksTest-college_computer_science-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 4ea26ad780290429ac5a3317559c154848d662bd40532c966458ba6f2a32d0a3
lm-evaluation/tests/testdata/hendrycksTest-elementary_mathematics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-elementary_mathematics": {"acc": 0.2724867724867725, "acc_norm": 0.2830687830687831, "acc_norm_stderr": 0.023201392938194978, "acc_stderr": 0.022930973071633345}}, "versions": {"hendrycksTest-elementary_mathematics": 0}}
lm-evaluation/tests/testdata/hendrycksTest-global_facts-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 9fdc85240b8170839278b1e883ee0868611d84dce202cb8aa037c841ec76d089
lm-evaluation/tests/testdata/hendrycksTest-high_school_european_history-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d8070e113be9d420fef5578cb69c70df4ea5118f9b18553023fd9efd5ff0b7f4
lm-evaluation/tests/testdata/hendrycksTest-high_school_world_history-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 1c8b994bd9a63ec874fc8d0e3a27077118b7adc472306b2fd6c55635a78b9d52
lm-evaluation/tests/testdata/hendrycksTest-management-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-management": {"acc": 0.24271844660194175, "acc_norm": 0.2621359223300971, "acc_norm_stderr": 0.043546310772605956, "acc_stderr": 0.04245022486384495}}, "versions": {"hendrycksTest-management": 0}}
lm-evaluation/tests/testdata/hendrycksTest-moral_scenarios-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-moral_scenarios": {"acc": 0.2547486033519553, "acc_norm": 0.25251396648044694, "acc_norm_stderr": 0.014530330201468654, "acc_stderr": 0.014572650383409158}}, "versions": {"hendrycksTest-moral_scenarios": 0}}
lm-evaluation/tests/testdata/hendrycksTest-prehistory-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6983c560a562749f4f702249a3a6ae51fa495acc0643a980bf2cf52c6c5d4b95
lm-evaluation/tests/testdata/iwslt17-en-ar-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ b20adbcd2c6d135e28600b427113532c5df624cb3a90e8c5e48715c09a3a38fa
lm-evaluation/tests/testdata/lambada_cloze-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_cloze": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_cloze": 0}}
lm-evaluation/tests/testdata/lambada_openai-v2.0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_openai": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai": "2.0"}}
lm-evaluation/tests/testdata/lambada_openai_mt_es-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 4a88f4b316c72fe0396c382d6cbb33568ac4d0ad225150d3536635c085359fc9
lm-evaluation/tests/testdata/logiqa-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"logiqa": {"acc": 0.25806451612903225, "acc_norm": 0.2764976958525346, "acc_norm_stderr": 0.017543209075825194, "acc_stderr": 0.017162894755127077}}, "versions": {"logiqa": 0}}
lm-evaluation/tests/testdata/math_counting_and_prob-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 2aa9ae43ee9dbb2457525247d7b65358632c5eaa9cbfc40cf95a4f17f5d942ad
lm-evaluation/tests/testdata/math_intermediate_algebra-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ d53c699de272d517ed7ad783b4e692302be9f9f97a8d4ac7a6541e538a7cabe0
lm-evaluation/tests/testdata/math_intermediate_algebra-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ d53c699de272d517ed7ad783b4e692302be9f9f97a8d4ac7a6541e538a7cabe0
lm-evaluation/tests/testdata/math_num_theory-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ b920ccb507afdcf3ef6f4c04891913731e9f32ec914801791c6d9f8abf6e1897
lm-evaluation/tests/testdata/mathqa-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"mathqa": {"acc": 0.20770519262981574, "acc_norm": 0.2050251256281407, "acc_norm_stderr": 0.007390619359738901, "acc_stderr": 0.007426217631188539}}, "versions": {"mathqa": 0}}
lm-evaluation/tests/testdata/mutual-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"mutual": {"mrr": 0.5023513920240772, "mrr_stderr": 0.009501864812936679, "r@1": 0.22460496613995484, "r@1_stderr": 0.014028122493992806, "r@2": 0.4706546275395034, "r@2_stderr": 0.016778343895001414}}, "versions": {"mutual": 1}}