applied-ai-018 commited on
Commit
50b2833
·
verified ·
1 Parent(s): e48b510

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_chemistry.yaml +6 -0
  2. lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_physics.yaml +6 -0
  3. lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_macroeconomics.yaml +6 -0
  4. lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_mathematics.yaml +6 -0
  5. lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_psychology.yaml +6 -0
  6. lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_jurisprudence.yaml +6 -0
  7. lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_professional_accounting.yaml +6 -0
  8. lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_professional_law.yaml +6 -0
  9. lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_professional_psychology.yaml +6 -0
  10. lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_public_relations.yaml +6 -0
  11. lm-evaluation/tests/testdata/arc_challenge-v2.0-res.json +1 -0
  12. lm-evaluation/tests/testdata/arithmetic_2da-v0-loglikelihood +1 -0
  13. lm-evaluation/tests/testdata/arithmetic_3da-v0-loglikelihood +1 -0
  14. lm-evaluation/tests/testdata/arithmetic_3ds-v0-loglikelihood +1 -0
  15. lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_2-v0-res.json +1 -0
  16. lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_irregular_1-v0-loglikelihood +1 -0
  17. lm-evaluation/tests/testdata/blimp_drop_argument-v0-res.json +1 -0
  18. lm-evaluation/tests/testdata/blimp_ellipsis_n_bar_2-v0-res.json +1 -0
  19. lm-evaluation/tests/testdata/blimp_left_branch_island_simple_question-v0-res.json +1 -0
  20. lm-evaluation/tests/testdata/blimp_only_npi_licensor_present-v0-loglikelihood +1 -0
  21. lm-evaluation/tests/testdata/blimp_passive_2-v0-res.json +1 -0
  22. lm-evaluation/tests/testdata/blimp_tough_vs_raising_1-v0-loglikelihood +1 -0
  23. lm-evaluation/tests/testdata/blimp_tough_vs_raising_2-v0-loglikelihood +1 -0
  24. lm-evaluation/tests/testdata/blimp_tough_vs_raising_2-v0-res.json +1 -0
  25. lm-evaluation/tests/testdata/blimp_wh_vs_that_no_gap-v0-res.json +1 -0
  26. lm-evaluation/tests/testdata/cb-v1-res.json +1 -0
  27. lm-evaluation/tests/testdata/copa-v0-loglikelihood +1 -0
  28. lm-evaluation/tests/testdata/crows_pairs_english_religion-v0-loglikelihood +1 -0
  29. lm-evaluation/tests/testdata/crows_pairs_english_sexual_orientation-v0-res.json +1 -0
  30. lm-evaluation/tests/testdata/crows_pairs_french-v0-loglikelihood +1 -0
  31. lm-evaluation/tests/testdata/crows_pairs_french_gender-v0-loglikelihood +1 -0
  32. lm-evaluation/tests/testdata/gguf_test_8fcf3f2f52afeb2acd7c8e02c2cc3ce31a691b665d295f6c4e4bbd71c7caa1a2.pkl +3 -0
  33. lm-evaluation/tests/testdata/gpt3_test_0deb8e9bde8e8327bbc48157f638ff3ba06b0cd816dad2beb8ad90f7fbe795c7.pkl +3 -0
  34. lm-evaluation/tests/testdata/gpt3_test_8025023377febbd8c5f2b9f26705c394ff375d0cad7c89c10fd9b8e1eb66ff1c.pkl +3 -0
  35. lm-evaluation/tests/testdata/gpt3_test_cfd11f555a5a63b6dfa114a55a932e51b724cdd44d4842586b9ce37260bf7aaa.pkl +3 -0
  36. lm-evaluation/tests/testdata/gpt3_test_f307d52964c295e2005c5e782b688c24388e0cecadf29f1e6fc7f394236ea9c0.pkl +3 -0
  37. lm-evaluation/tests/testdata/headqa_es-v0-loglikelihood +1 -0
  38. lm-evaluation/tests/testdata/hendrycksTest-anatomy-v0-res.json +1 -0
  39. lm-evaluation/tests/testdata/hendrycksTest-astronomy-v0-res.json +1 -0
  40. lm-evaluation/tests/testdata/hendrycksTest-college_mathematics-v0-res.json +1 -0
  41. lm-evaluation/tests/testdata/hendrycksTest-high_school_world_history-v0-res.json +1 -0
  42. lm-evaluation/tests/testdata/hendrycksTest-human_sexuality-v0-loglikelihood +1 -0
  43. lm-evaluation/tests/testdata/hendrycksTest-logical_fallacies-v0-loglikelihood +1 -0
  44. lm-evaluation/tests/testdata/hendrycksTest-management-v0-loglikelihood +1 -0
  45. lm-evaluation/tests/testdata/hendrycksTest-marketing-v0-res.json +1 -0
  46. lm-evaluation/tests/testdata/hendrycksTest-medical_genetics-v0-res.json +1 -0
  47. lm-evaluation/tests/testdata/hendrycksTest-professional_medicine-v0-loglikelihood +1 -0
  48. lm-evaluation/tests/testdata/hendrycksTest-sociology-v0-res.json +1 -0
  49. lm-evaluation/tests/testdata/hendrycksTest-us_foreign_policy-v0-loglikelihood +1 -0
  50. lm-evaluation/tests/testdata/lambada_cloze-v0-loglikelihood +1 -0
lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_chemistry.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "college_chemistry"
2
+ "description": "The following are multiple choice questions (with answers) about college\
3
+ \ chemistry.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_college_chemistry"
lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_college_physics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "college_physics"
2
+ "description": "The following are multiple choice questions (with answers) about college\
3
+ \ physics.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_college_physics"
lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_macroeconomics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_macroeconomics"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school macroeconomics.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_social_sciences"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_high_school_macroeconomics"
lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_mathematics.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_mathematics"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school mathematics.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_stem"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_high_school_mathematics"
lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_high_school_psychology.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "high_school_psychology"
2
+ "description": "The following are multiple choice questions (with answers) about high\
3
+ \ school psychology.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_social_sciences"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_high_school_psychology"
lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_jurisprudence.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "jurisprudence"
2
+ "description": "The following are multiple choice questions (with answers) about jurisprudence.\n\
3
+ \n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_humanities"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_jurisprudence"
lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_professional_accounting.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "professional_accounting"
2
+ "description": "The following are multiple choice questions (with answers) about professional\
3
+ \ accounting.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_other"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_professional_accounting"
lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_professional_law.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "professional_law"
2
+ "description": "The following are multiple choice questions (with answers) about professional\
3
+ \ law.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_humanities"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_professional_law"
lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_professional_psychology.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "professional_psychology"
2
+ "description": "The following are multiple choice questions (with answers) about professional\
3
+ \ psychology.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_social_sciences"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_professional_psychology"
lm-evaluation/lm_eval/tasks/mmlu/flan_n_shot/loglikelihood/mmlu_public_relations.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ "dataset_name": "public_relations"
2
+ "description": "The following are multiple choice questions (with answers) about public\
3
+ \ relations.\n\n"
4
+ "group": "mmlu_flan_n_shot_loglikelihood_social_sciences"
5
+ "include": "_mmlu_flan_loglikelihood_template_yaml"
6
+ "task": "mmlu_flan_n_shot_loglikelihood_public_relations"
lm-evaluation/tests/testdata/arc_challenge-v2.0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arc_challenge": {"acc": 0.26621160409556316, "acc_norm": 0.28242320819112626, "acc_norm_stderr": 0.01315545688409722, "acc_stderr": 0.01291577478152323}}, "versions": {"arc_challenge": "2.0"}}
lm-evaluation/tests/testdata/arithmetic_2da-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6ca1ca6ebd7cac4420d5005f7f35b0edbc921377f5e4f8874cc176e4fb6d79d4
lm-evaluation/tests/testdata/arithmetic_3da-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ c421f9cd5a5001b80e528441da925128177a04db8526ebcdab543a90b33c9ce2
lm-evaluation/tests/testdata/arithmetic_3ds-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d3d8bad8827d4530945a1d8b3c7589c0235bbed0bc89e7561a6fdac678f6ce5c
lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_determiner_noun_agreement_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_2": 0}}
lm-evaluation/tests/testdata/blimp_determiner_noun_agreement_irregular_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7fab9f02e71a224ae7931aa77f8a9a61d887a7480756adc965d4746e97fb04a5
lm-evaluation/tests/testdata/blimp_drop_argument-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_drop_argument": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_drop_argument": 0}}
lm-evaluation/tests/testdata/blimp_ellipsis_n_bar_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_ellipsis_n_bar_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_ellipsis_n_bar_2": 0}}
lm-evaluation/tests/testdata/blimp_left_branch_island_simple_question-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_left_branch_island_simple_question": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_left_branch_island_simple_question": 0}}
lm-evaluation/tests/testdata/blimp_only_npi_licensor_present-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d2d0711611b5b218c6fa8c7278494749252b7868c396451919b761303556bd66
lm-evaluation/tests/testdata/blimp_passive_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_passive_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_passive_2": 0}}
lm-evaluation/tests/testdata/blimp_tough_vs_raising_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 973fe56534fdef1207f0fc08dd09a210304c55f33c6cbb17552754bf54f11c86
lm-evaluation/tests/testdata/blimp_tough_vs_raising_2-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d255a10a34f14d77d9526604a17b0f6747d32f62fc2e3a09e9ab10054535fd45
lm-evaluation/tests/testdata/blimp_tough_vs_raising_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_tough_vs_raising_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_tough_vs_raising_2": 0}}
lm-evaluation/tests/testdata/blimp_wh_vs_that_no_gap-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_wh_vs_that_no_gap": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_vs_that_no_gap": 0}}
lm-evaluation/tests/testdata/cb-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"cb": {"acc": 0.3392857142857143, "acc_stderr": 0.06384226561930825, "f1": 0.2819143819143819}}, "versions": {"cb": 1}}
lm-evaluation/tests/testdata/copa-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 66276b9045b5300cba4b81340db06f674f031fa0b8883714ad0d03be464cd799
lm-evaluation/tests/testdata/crows_pairs_english_religion-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 2ed57377174adaf0fb30037eb055eafdd02cd46e57bc32066d5fecd90a14b6e1
lm-evaluation/tests/testdata/crows_pairs_english_sexual_orientation-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"crows_pairs_english_sexual_orientation": {"likelihood_difference": 0.31947594049467243, "likelihood_difference_stderr": 0.024404952720497735, "pct_stereotype": 0.43010752688172044, "pct_stereotype_stderr": 0.051616798980291805}}, "versions": {"crows_pairs_english_sexual_orientation": 0}}
lm-evaluation/tests/testdata/crows_pairs_french-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 4fb61dcf4d2c59d6470b297a01d5f429ee442864e225e1760fbf191b2a0901cd
lm-evaluation/tests/testdata/crows_pairs_french_gender-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 010b8404655911c86555616da23afffce9dc3981e1acbbfdb022d9c474430209
lm-evaluation/tests/testdata/gguf_test_8fcf3f2f52afeb2acd7c8e02c2cc3ce31a691b665d295f6c4e4bbd71c7caa1a2.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f79475c06a8800d8abef183b690409f304e0a6963681965f6caba1ca985b243
3
+ size 532
lm-evaluation/tests/testdata/gpt3_test_0deb8e9bde8e8327bbc48157f638ff3ba06b0cd816dad2beb8ad90f7fbe795c7.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f7a190d338d1ef03f209a8a3340c0d282c73723633b8f5a71a8dc8ee94b9535
3
+ size 570
lm-evaluation/tests/testdata/gpt3_test_8025023377febbd8c5f2b9f26705c394ff375d0cad7c89c10fd9b8e1eb66ff1c.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:590805560ee790d530c075ad76633eb2e9749440083e0bab63489ff920fdfd33
3
+ size 70917
lm-evaluation/tests/testdata/gpt3_test_cfd11f555a5a63b6dfa114a55a932e51b724cdd44d4842586b9ce37260bf7aaa.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d531b0854314516cad7d56c7e28a694bf23072429147b235e9c6534492867bb2
3
+ size 2984
lm-evaluation/tests/testdata/gpt3_test_f307d52964c295e2005c5e782b688c24388e0cecadf29f1e6fc7f394236ea9c0.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f11de4b3d45d1590ba78935e824ae86ef75bbc370df500f89dde2c397d11c01a
3
+ size 1297
lm-evaluation/tests/testdata/headqa_es-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 767ca34d9714edd9fb030ddbcc35a64e5180d1e247b0cb557fbb22fdf971ad1f
lm-evaluation/tests/testdata/hendrycksTest-anatomy-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-anatomy": {"acc": 0.2222222222222222, "acc_norm": 0.23703703703703705, "acc_norm_stderr": 0.03673731683969506, "acc_stderr": 0.0359144408419697}}, "versions": {"hendrycksTest-anatomy": 0}}
lm-evaluation/tests/testdata/hendrycksTest-astronomy-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-astronomy": {"acc": 0.2565789473684211, "acc_norm": 0.29605263157894735, "acc_norm_stderr": 0.03715062154998904, "acc_stderr": 0.0355418036802569}}, "versions": {"hendrycksTest-astronomy": 0}}
lm-evaluation/tests/testdata/hendrycksTest-college_mathematics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-college_mathematics": {"acc": 0.18, "acc_norm": 0.2, "acc_norm_stderr": 0.04020151261036844, "acc_stderr": 0.038612291966536955}}, "versions": {"hendrycksTest-college_mathematics": 0}}
lm-evaluation/tests/testdata/hendrycksTest-high_school_world_history-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_world_history": {"acc": 0.23628691983122363, "acc_norm": 0.24472573839662448, "acc_norm_stderr": 0.02798569938703642, "acc_stderr": 0.027652153144159263}}, "versions": {"hendrycksTest-high_school_world_history": 0}}
lm-evaluation/tests/testdata/hendrycksTest-human_sexuality-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 4b07922fa1d549b655c21440b13d869263ce7dd9771d8147c450f11c91d26c10
lm-evaluation/tests/testdata/hendrycksTest-logical_fallacies-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 2e9449dd803f9e2334dc562d9f04031fd013ed36b883b44ab500533a5dbbface
lm-evaluation/tests/testdata/hendrycksTest-management-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 355489f4bd176ab84db5ef4c03d56ddeeeb1b0ad69827122b2d800e1cdc7e5f0
lm-evaluation/tests/testdata/hendrycksTest-marketing-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-marketing": {"acc": 0.2863247863247863, "acc_norm": 0.2905982905982906, "acc_norm_stderr": 0.029745048572674043, "acc_stderr": 0.029614323690456648}}, "versions": {"hendrycksTest-marketing": 0}}
lm-evaluation/tests/testdata/hendrycksTest-medical_genetics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-medical_genetics": {"acc": 0.27, "acc_norm": 0.29, "acc_norm_stderr": 0.04560480215720684, "acc_stderr": 0.0446196043338474}}, "versions": {"hendrycksTest-medical_genetics": 0}}
lm-evaluation/tests/testdata/hendrycksTest-professional_medicine-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7a30599858398169cde61430c18efdd7fb4dcd09c34aa9baba70f0f8cf17a9f1
lm-evaluation/tests/testdata/hendrycksTest-sociology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-sociology": {"acc": 0.23383084577114427, "acc_norm": 0.24875621890547264, "acc_norm_stderr": 0.030567675938916707, "acc_stderr": 0.02992941540834838}}, "versions": {"hendrycksTest-sociology": 0}}
lm-evaluation/tests/testdata/hendrycksTest-us_foreign_policy-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ a1a338d0083a21054f74d36a296d6bd8e2e457327c0fd630bebcc61ed758044d
lm-evaluation/tests/testdata/lambada_cloze-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7655e748b63ae7e9911411d2d2a2577221d6c861ca4448509992541294d689f3