applied-ai-018 commited on
Commit
d31d8d7
·
verified ·
1 Parent(s): 8b9d80c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/tests/testdata/anagrams1-v0-greedy_until +1 -0
  2. lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-res.json +1 -0
  3. lm-evaluation/tests/testdata/blimp_distractor_agreement_relational_noun-v0-res.json +1 -0
  4. lm-evaluation/tests/testdata/blimp_drop_argument-v0-loglikelihood +1 -0
  5. lm-evaluation/tests/testdata/blimp_ellipsis_n_bar_1-v0-loglikelihood +1 -0
  6. lm-evaluation/tests/testdata/blimp_ellipsis_n_bar_2-v0-loglikelihood +1 -0
  7. lm-evaluation/tests/testdata/blimp_existential_there_subject_raising-v0-res.json +1 -0
  8. lm-evaluation/tests/testdata/blimp_irregular_plural_subject_verb_agreement_1-v0-res.json +1 -0
  9. lm-evaluation/tests/testdata/blimp_npi_present_1-v0-res.json +1 -0
  10. lm-evaluation/tests/testdata/blimp_principle_A_domain_1-v0-res.json +1 -0
  11. lm-evaluation/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-res.json +1 -0
  12. lm-evaluation/tests/testdata/blimp_sentential_subject_island-v0-loglikelihood +1 -0
  13. lm-evaluation/tests/testdata/blimp_superlative_quantifiers_2-v0-res.json +1 -0
  14. lm-evaluation/tests/testdata/blimp_transitive-v0-res.json +1 -0
  15. lm-evaluation/tests/testdata/blimp_wh_vs_that_with_gap_long_distance-v0-loglikelihood +1 -0
  16. lm-evaluation/tests/testdata/boolq-v0-res.json +1 -0
  17. lm-evaluation/tests/testdata/crows_pairs_english-v0-loglikelihood +1 -0
  18. lm-evaluation/tests/testdata/crows_pairs_english_socioeconomic-v0-res.json +1 -0
  19. lm-evaluation/tests/testdata/crows_pairs_french_autre-v0-res.json +1 -0
  20. lm-evaluation/tests/testdata/crows_pairs_french_disability-v0-loglikelihood +1 -0
  21. lm-evaluation/tests/testdata/crows_pairs_french_physical_appearance-v0-res.json +1 -0
  22. lm-evaluation/tests/testdata/drop-v0-greedy_until +1 -0
  23. lm-evaluation/tests/testdata/headqa-v0-loglikelihood +1 -0
  24. lm-evaluation/tests/testdata/headqa-v0-res.json +1 -0
  25. lm-evaluation/tests/testdata/hendrycksTest-college_medicine-v0-res.json +1 -0
  26. lm-evaluation/tests/testdata/hendrycksTest-college_physics-v0-loglikelihood +1 -0
  27. lm-evaluation/tests/testdata/hendrycksTest-formal_logic-v0-loglikelihood +1 -0
  28. lm-evaluation/tests/testdata/hendrycksTest-high_school_computer_science-v0-loglikelihood +1 -0
  29. lm-evaluation/tests/testdata/hendrycksTest-high_school_european_history-v0-res.json +1 -0
  30. lm-evaluation/tests/testdata/hendrycksTest-high_school_macroeconomics-v0-res.json +1 -0
  31. lm-evaluation/tests/testdata/hendrycksTest-high_school_psychology-v0-loglikelihood +1 -0
  32. lm-evaluation/tests/testdata/hendrycksTest-jurisprudence-v0-res.json +1 -0
  33. lm-evaluation/tests/testdata/hendrycksTest-logical_fallacies-v0-res.json +1 -0
  34. lm-evaluation/tests/testdata/hendrycksTest-medical_genetics-v0-loglikelihood +1 -0
  35. lm-evaluation/tests/testdata/hendrycksTest-professional_accounting-v0-loglikelihood +1 -0
  36. lm-evaluation/tests/testdata/hendrycksTest-professional_accounting-v0-res.json +1 -0
  37. lm-evaluation/tests/testdata/hendrycksTest-virology-v0-loglikelihood +1 -0
  38. lm-evaluation/tests/testdata/iwslt17-ar-en-v0-greedy_until +1 -0
  39. lm-evaluation/tests/testdata/lambada_mt_de-v0-res.json +1 -0
  40. lm-evaluation/tests/testdata/lambada_mt_it-v0-res.json +1 -0
  41. lm-evaluation/tests/testdata/lambada_openai-v0-loglikelihood +1 -0
  42. lm-evaluation/tests/testdata/lambada_openai_cloze-v0-res.json +1 -0
  43. lm-evaluation/tests/testdata/lambada_openai_mt_en-v0-loglikelihood +1 -0
  44. lm-evaluation/tests/testdata/lambada_openai_mt_it-v0-res.json +1 -0
  45. lm-evaluation/tests/testdata/lambada_standard_cloze-v0-loglikelihood +1 -0
  46. lm-evaluation/tests/testdata/math_algebra-v0-res.json +1 -0
  47. lm-evaluation/tests/testdata/math_geometry-v0-res.json +1 -0
  48. lm-evaluation/tests/testdata/math_num_theory-v0-greedy_until +1 -0
  49. lm-evaluation/tests/testdata/math_precalc-v0-res.json +1 -0
  50. lm-evaluation/tests/testdata/math_precalc-v1-greedy_until +1 -0
lm-evaluation/tests/testdata/anagrams1-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 7c0c5246d3f751f39119a5629ac1d4b2c6fd2a315f78d6de9b2c387e24e3fef1
lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_determiner_noun_agreement_with_adjective_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_with_adjective_1": 0}}
lm-evaluation/tests/testdata/blimp_distractor_agreement_relational_noun-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_distractor_agreement_relational_noun": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_distractor_agreement_relational_noun": 0}}
lm-evaluation/tests/testdata/blimp_drop_argument-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 616109e63f162dcd31a632943e7ef0c9e0431afeb179e83e9b04b39007b16f5b
lm-evaluation/tests/testdata/blimp_ellipsis_n_bar_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d14e4b7fcdd68991eb39b9cf3ade4b37dee9ddd39b688f861d81a327e47a969f
lm-evaluation/tests/testdata/blimp_ellipsis_n_bar_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 0523771a217759f0b22b89807694ee7f6381ce98a584b1fd070ba96194a3273b
lm-evaluation/tests/testdata/blimp_existential_there_subject_raising-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_existential_there_subject_raising": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_existential_there_subject_raising": 0}}
lm-evaluation/tests/testdata/blimp_irregular_plural_subject_verb_agreement_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_irregular_plural_subject_verb_agreement_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_irregular_plural_subject_verb_agreement_1": 0}}
lm-evaluation/tests/testdata/blimp_npi_present_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_npi_present_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_npi_present_1": 0}}
lm-evaluation/tests/testdata/blimp_principle_A_domain_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_domain_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_1": 0}}
lm-evaluation/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_sentential_negation_npi_licensor_present": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_sentential_negation_npi_licensor_present": 0}}
lm-evaluation/tests/testdata/blimp_sentential_subject_island-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 80f5f98fad26240de2767fe58c4b18d864df41cbfa76f06c84c3fce9f14f4833
lm-evaluation/tests/testdata/blimp_superlative_quantifiers_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_superlative_quantifiers_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_superlative_quantifiers_2": 0}}
lm-evaluation/tests/testdata/blimp_transitive-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_transitive": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_transitive": 0}}
lm-evaluation/tests/testdata/blimp_wh_vs_that_with_gap_long_distance-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ eed67491bdf493a1dad8f1d9766bc7bd0e79946365b833c0f7eb81ac998e3dca
lm-evaluation/tests/testdata/boolq-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"boolq": {"acc": 0.5048929663608562, "acc_stderr": 0.00874463623355505}}, "versions": {"boolq": 0}}
lm-evaluation/tests/testdata/crows_pairs_english-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ee3ce1ddb8071d4189e5b06e7f3c618a434221ac52935d0f434c4d183f01458a
lm-evaluation/tests/testdata/crows_pairs_english_socioeconomic-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english_socioeconomic": {"likelihood_difference": 0.3424577735757881, "likelihood_difference_stderr": 0.017459994170011896, "pct_stereotype": 0.46842105263157896, "pct_stereotype_stderr": 0.036297038088316094}}, "versions": {"crows_pairs_english_socioeconomic": 0}}
lm-evaluation/tests/testdata/crows_pairs_french_autre-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_french_autre": {"likelihood_difference": 0.3517045997290783, "likelihood_difference_stderr": 0.07647821858130377, "pct_stereotype": 0.23076923076923078, "pct_stereotype_stderr": 0.12162606385262997}}, "versions": {"crows_pairs_french_autre": 0}}
lm-evaluation/tests/testdata/crows_pairs_french_disability-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ fa1e5fc7492a66c9a90765e605003c38408347617db5ecf36706f1d374af5d42
lm-evaluation/tests/testdata/crows_pairs_french_physical_appearance-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_french_physical_appearance": {"likelihood_difference": 0.3221673223187262, "likelihood_difference_stderr": 0.026978346460100555, "pct_stereotype": 0.4027777777777778, "pct_stereotype_stderr": 0.05820650942569533}}, "versions": {"crows_pairs_french_physical_appearance": 0}}
lm-evaluation/tests/testdata/drop-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ ca566c630d8ac853d5785d4b5c40a5137172c34b48af3350e1f79e6d548b36ba
lm-evaluation/tests/testdata/headqa-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 767ca34d9714edd9fb030ddbcc35a64e5180d1e247b0cb557fbb22fdf971ad1f
lm-evaluation/tests/testdata/headqa-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"headqa": {"acc": 0.23559445660102116, "acc_norm": 0.25018234865062, "acc_norm_stderr": 0.008272783230806014, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa": 0}}
lm-evaluation/tests/testdata/hendrycksTest-college_medicine-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-college_medicine": {"acc": 0.27167630057803466, "acc_norm": 0.2543352601156069, "acc_norm_stderr": 0.0332055644308557, "acc_stderr": 0.03391750322321659}}, "versions": {"hendrycksTest-college_medicine": 0}}
lm-evaluation/tests/testdata/hendrycksTest-college_physics-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 704a7671ef981fb95594782bc446dd632e87ebdbe89436a0603b714fb5786c75
lm-evaluation/tests/testdata/hendrycksTest-formal_logic-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ c0d0f0c008a5f3faf2f6f4268d87bbc09c40bb66ae08cf38eea0bf2e519c5a59
lm-evaluation/tests/testdata/hendrycksTest-high_school_computer_science-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 870d5a6300c527077aaf6baa3e750e75fa840b41657cf82549f39b768b14862d
lm-evaluation/tests/testdata/hendrycksTest-high_school_european_history-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_european_history": {"acc": 0.23636363636363636, "acc_norm": 0.24242424242424243, "acc_norm_stderr": 0.03346409881055953, "acc_stderr": 0.033175059300091805}}, "versions": {"hendrycksTest-high_school_european_history": 0}}
lm-evaluation/tests/testdata/hendrycksTest-high_school_macroeconomics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_macroeconomics": {"acc": 0.2230769230769231, "acc_norm": 0.22564102564102564, "acc_norm_stderr": 0.021193632525148522, "acc_stderr": 0.021107730127244}}, "versions": {"hendrycksTest-high_school_macroeconomics": 0}}
lm-evaluation/tests/testdata/hendrycksTest-high_school_psychology-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 0e4c8d13806d3696167e40544d2d114c557c10c74bc61fcb9c51bbfced0266ef
lm-evaluation/tests/testdata/hendrycksTest-jurisprudence-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-jurisprudence": {"acc": 0.25, "acc_norm": 0.3148148148148148, "acc_norm_stderr": 0.04489931073591312, "acc_stderr": 0.04186091791394607}}, "versions": {"hendrycksTest-jurisprudence": 0}}
lm-evaluation/tests/testdata/hendrycksTest-logical_fallacies-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-logical_fallacies": {"acc": 0.20245398773006135, "acc_norm": 0.2147239263803681, "acc_norm_stderr": 0.03226219377286774, "acc_stderr": 0.03157065078911902}}, "versions": {"hendrycksTest-logical_fallacies": 0}}
lm-evaluation/tests/testdata/hendrycksTest-medical_genetics-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ db6141246889a19dd3f6b9109f314d49c1a70f7a98795858804378b095c4a2fe
lm-evaluation/tests/testdata/hendrycksTest-professional_accounting-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 847418f7b22cd9b499e95fd73c40a2fbc40076895280cc2c560199c0c4c4f433
lm-evaluation/tests/testdata/hendrycksTest-professional_accounting-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-professional_accounting": {"acc": 0.2553191489361702, "acc_norm": 0.26595744680851063, "acc_norm_stderr": 0.026358065698880582, "acc_stderr": 0.026011992930902006}}, "versions": {"hendrycksTest-professional_accounting": 0}}
lm-evaluation/tests/testdata/hendrycksTest-virology-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 0ffa491f7bad2abbb64ecd752a295729167599b3815238cab0ecf4cb08bba9b6
lm-evaluation/tests/testdata/iwslt17-ar-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ e94d310de91fad7ce36f4cf3305552020221482c5588f2efcefaa019893504f1
lm-evaluation/tests/testdata/lambada_mt_de-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_mt_de": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_mt_de": 0}}
lm-evaluation/tests/testdata/lambada_mt_it-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_mt_it": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_mt_it": 0}}
lm-evaluation/tests/testdata/lambada_openai-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6829e6a8aa5922e6c92dd31403cc060f242dc0ede4a775e085a70da095ab2e20
lm-evaluation/tests/testdata/lambada_openai_cloze-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_openai_cloze": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_cloze": 0}}
lm-evaluation/tests/testdata/lambada_openai_mt_en-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6829e6a8aa5922e6c92dd31403cc060f242dc0ede4a775e085a70da095ab2e20
lm-evaluation/tests/testdata/lambada_openai_mt_it-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_openai_mt_it": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_mt_it": 0}}
lm-evaluation/tests/testdata/lambada_standard_cloze-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ b604f00bc9f2a77ef41f8cfdb5a8509b3ae9266893b9e90abc665f5399ecba4e
lm-evaluation/tests/testdata/math_algebra-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_algebra": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_algebra": 0}}
lm-evaluation/tests/testdata/math_geometry-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_geometry": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_geometry": 0}}
lm-evaluation/tests/testdata/math_num_theory-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ b920ccb507afdcf3ef6f4c04891913731e9f32ec914801791c6d9f8abf6e1897
lm-evaluation/tests/testdata/math_precalc-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_precalc": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_precalc": 0}}
lm-evaluation/tests/testdata/math_precalc-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ bc834b06fd79473ca6fe38a51b714aad0bf0478c1b0eec787eca34dbdf69cb71