Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/dyck_languages.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/formal_fallacies.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/geometric_shapes.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/logical_deduction_five_objects.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/movie_recommendation.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/ruin_names.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/sports_understanding.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_seven_objects.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_three_objects.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/word_sorting.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/boolean_expressions.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/causal_judgement.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/disambiguation_qa.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/dyck_languages.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/formal_fallacies.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/hyperbaton.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/logical_deduction_five_objects.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/logical_deduction_three_objects.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/navigate.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/object_counting.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/penguins_in_a_table.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/reasoning_about_colored_objects.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/ruin_names.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/sports_understanding.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/web_of_lies.yaml +5 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/README.md +48 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/_default_template_yaml +19 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_arts.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_astronomy.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_business_ethics.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_mathematics.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_medical_statistics.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_computer_science.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_conceptual_physics.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_economics.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_elementary_information_and_technology.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_food_science.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_genetics.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_high_school_biology.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_high_school_chemistry.yaml +4 -0
lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/dyck_languages.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "dyck_languages"
|
2 |
+
"description": "Correctly close a Dyck-n word.\n\n"
|
3 |
+
"doc_to_text": "Q: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: [ { [\nA: Let's think step by step.\nWe should process each input one by one and keep track of the stack configuration.\n0: empty stack\n1: [ ; stack: [\n2: { ; stack: [ {\n3: [ ; stack: [ { [\nNow, we have reached the end. The final stack is \"[ { [\".\nWe will need to pop out \"[\", \"{\", \"[\" one by one in that order.\nSo, we need \"]\", \"}\", \"]\". So the answer is ] } ].\n\nQ: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < > ( ( [ [ ( { } ) [ < > ] ]\nA: Let's think step by step.\nWe should process each input one by one and keep track of the stack configuration.\n0: empty stack\n1: < ; stack: <\n2: > ; stack: empty\n3: ( ; stack: (\n4: ( ; stack: ( (\n5: [ ; stack: ( ( [\n6: [ ; stack: ( ( [ [\n7: ( ; stack: ( ( [ [ (\n8: { ; stack: ( ( [ [ ( {\n9: } ; stack: ( ( [ [ (\n10: ) ; stack: ( ( [ [\n11: [ ; stack: ( ( [ [ [\n12: < ; stack: ( ( [ [ [ <\n13: > ; stack: ( ( [ [ [\n14: ] ; stack: ( ( [ [\n15: ] ; stack: ( ( [\nNow, we have reached the end. The final stack is \"( ( [\".\nWe will need to pop out \"[\", \"(\", \"(\" one by one in that order.\nSo, we need \"]\", \")\", \")\". So the answer is ] ) ).\n\nQ: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < [ < [ { < [ ] < { } > > } ] > { { ( ) } { < [ < > ] > }\nA: Let's think step by step.\nWe should process each input one by one and keep track of the stack configuration.\n0: empty stack\n1: < ; stack: <\n2: [ ; stack: < [\n3: < ; stack: < [ <\n4: [ ; stack: < [ < [\n5: { ; stack: < [ < [ {\n6: < ; stack: < [ < [ { <\n7: [ ; stack: < [ < [ { < [\n8: ] ; stack: < [ < [ { <\n9: < ; stack: < [ < [ { < <\n10: { ; stack: < [ < [ { < < {\n11: } ; stack: < [ < [ { < <\n12: > ; stack: < [ < [ { <\n13: > ; stack: < [ < [ {\n14: } ; stack: < [ < [\n15: ] ; stack: < [ <\n16: > ; stack: < [\n17: { ; stack: < [ {\n18: { ; stack: < [ { {\n19: ( ; stack: < [ { { (\n20: ) ; stack: < [ { {\n21: } ; stack: < [ {\n22: { ; stack: < [ { {\n23: < ; stack: < [ { { <\n24: [ ; stack: < [ { { < [\n25: < ; stack: < [ { { < [ <\n26: > ; stack: < [ { { < [\n27: ] ; stack: < [ { { <\n28: > ; stack: < [ { {\n29: } ; stack: < [ {\nNow, we have reached the end. The final stack is \"< [ {\".\nWe will need to pop out \"{\", \"[\", \"<\" one by one in that order.\nSo, we need \"}\", \"]\", \">\". So the answer is } ] >.\n\nQ: {{input}}\nA: Let's think step by step.\n"
|
4 |
+
"include": "_cot_fewshot_template_yaml"
|
5 |
+
"task": "bbh_cot_fewshot_dyck_languages"
|
lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/formal_fallacies.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "formal_fallacies"
|
2 |
+
"description": "Distinguish deductively valid arguments from formal fallacies.\n\n"
|
3 |
+
"doc_to_text": "Q: \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: To begin with, Lesley is a close friend of Fernando. Moreover, being a close friend of Fernando or a schoolmate of Lowell is sufficient for being a great-grandfather of Leroy. It follows that Lesley is a great-grandfather of Leroy.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: Let's think step by step.\n(1) Lesley is a close friend of Fernando: Lesley = friend(Fernando).\n(2) Being a close friend of Fernando or a schoolmate of Lowell is sufficient for being a great-grandfather of Leroy: If X = friend(Fernando) OR SCHOOLMATE(Lowell), then X = great-grandfather(Leroy).\nHypothesis: Does it follow that Lesley is a great-grandfather of Leroy: Lesley = great-grandfather(Leroy)?\nLet\u2019s see whether the Hypothesis can be deduced from the arguments (1) and (2) by logical reasoning?\nBy (1), we have Lesley = friend(Fernando). By (2), we have if Lesley = friend(Fernando), then Lesley = great-grandfather(Leroy).\nSo, it is true that Lesley is a great-grandfather of Leroy. So the answer is valid.\n\nQ: \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: Whoever is not a great-grandfather of Clyde is a stepbrother of Brian. Being an ancestor of Dana is sufficient for not being a great-grandfather of Clyde. We may conclude: Everyone who is an ancestor of Dana is a stepbrother of Brian, too.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: Let's think step by step.\n(1) Whoever is not a great-grandfather of Clyde is a stepbrother of Brian: If X = NOT (great-grandfather(Clyde)), then X = stepbrother(Brian).\n(2): Being an ancestor of Dana is sufficient for not being a great-grandfather of Clyde: If X = ancestor(Dana), X = NOT (great-grandfather(Clyde)).\nHypothesis: Does it follow that everyone who is an ancestor of Dana is a stepbrother of Brian, too: If X = ancestor(Dana), then X = stepbrother(Brian)?\nLet\u2019s see whether the Hypothesis can be deduced from the arguments (1) and (2) by logical reasoning?\nBy (2), we have if X = ancestor(Dana), X = NOT (great-grandfather(Clyde)).\nFurthermore, by (1), we have if X = NOT (great-grandfather(Clyde)), then X = stepbrother(Brian).\nBy the transitive relation rule in first-order logic, we then have: if X = ancestor(Dana), then X = stepbrother(Brian).\nSo, it is true that everyone who is an ancestor of Dana is a stepbrother of Brian. So the answer is valid.\n\nQ: \"It is not always easy to grasp who is consuming which products. The following argument pertains to this question: Every infrequent user of Paul Mitchell shampoo is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap, or both. No regular consumer of Lush soap is a rare consumer of Nioxin shampoo and, in the same time, a loyal buyer of Caress soap. It follows that whoever is an infrequent user of Paul Mitchell shampoo is not a regular consumer of Lush soap.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: Let's think step by step.\n(1) Every infrequent user of Paul Mitchell shampoo is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap, or both: If X = infrequent-user(Paul Mitchell), then X = rare-consumer(Nioxin) OR X = loyal-buyer(Caress).\n(2): No regular consumer of Lush soap is a rare consumer of Nioxin shampoo and a loyal buyer of Caress soap at the same time. If X = regular-consumer(Lush), then X = NOT (rare-consumer(Nioxin) AND loyal-buyer(Caress)).\nHypothesis: Does it follow that whoever is an infrequent user of Paul Mitchell shampoo is not a regular consumer of Lush soap: If X = infrequent-user(Paul Mitchell), then X = NOT (regular-consumer(Lush))?\nLet\u2019s see whether the Hypothesis can be deduced from the arguments (1) and (2) by logical reasoning?\nBy (1), we have if X = infrequent-user(Paul Mitchell), then X = rare-consumer(Nioxin) OR X = loyal-buyer(Caress). We need to consider both cases separately:\nThe case X = rare-consumer(Nioxin) does not appear in (2).\nThe case X = loyal-buyer(Caress) does not appear in (2), either.\nSo, from (1) and (2), we cannot necessarily deduce the Hypothesis. So the answer is invalid.\n\nQ: {{input}}\nA: Let's think step by step.\n"
|
4 |
+
"include": "_cot_fewshot_template_yaml"
|
5 |
+
"task": "bbh_cot_fewshot_formal_fallacies"
|
lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/geometric_shapes.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "geometric_shapes"
|
2 |
+
"description": "Name geometric shapes from their SVG paths.\n\n"
|
3 |
+
"doc_to_text": "Q: This SVG path element <path d=\"M 31.00,73.00 L 32.00,59.00 L 44.00,50.00 L 49.00,41.00 L 64.00,37.00 L 71.00,55.00 L 64.00,76.00 L 52.00,61.00 L 31.00,73.00\"/> draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 9 separate commands.\n(1) M 31.00,73.00: Move the current point to 31.00,73.00.\n(2) L 32.00,59.00: Create a line from 31.00,73.00 to 32.00,59.00.\n(3) L 44.00,50.00: Create a line from 32.00,59.00 to 44.00,50.00.\n(4) L 49.00,41.00: Create a line from 44.00,50.00 to 49.00,41.00.\n(5) L 64.00,37.00: Create a line from 49.00,41.00 to 64.00,37.00.\n(6) L 71.00,55.00: Create a line from 64.00,37.00 to 71.00,55.00.\n(7) L 64.00,76.00: Create a line from 71.00,55.00 to 64.00,76.00.\n(8) L 52.00,61.00: Create a line from 64.00,76.00 to 52.00,61.00.\n(9) L 31.00,73.00: Create a line from 52.00,61.00 to 31.00,73.00.\nThis SVG path starts at point 31.00,73.00, creates eight consecutive and touching lines, and then returns back its starting point, thereby creating an eight-sided shape. It does not have any curves or arches. \"octagon\" is the only eight-sided object on the list. So the answer is (F).\n\nQ: This SVG path element <path d=\"M 14.19,26.04 L 51.43,39.21 L 58.44,36.69 L 56.63,30.17 L 48.53,26.66 L 14.19,26.04\"/> draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 6 separate commands.\n(1) M 14.19,26.04: Move the current point to 14.19,26.04.\n(2) L 51.43,39.21: Create a line from 14.19,26.04 to 51.43,39.21.\n(3) L 58.44,36.69: Create a line from 51.43,39.21 to 58.44,36.69.\n(4) L 56.63,30.17: Create a line from 58.44,36.69 to 56.63,30.17.\n(5) L 48.53,26.66: Create a line from 56.63,30.17 to 48.53,26.66.\n(6) L 14.19,26.04: Create a line from 48.53,26.66 to 14.19,26.04.\nThis SVG path starts at point 14.19,26.04, creates five consecutive and touching lines, and then returns back its starting point, thereby creating a five-sided shape. It does not have any curves or arches. \"pentagon\" is the only five-sided polygon on the list. So the answer is (G).\n\nQ: This SVG path element <path d=\"M 41.00,43.00 L 37.00,34.00 L 41.00,33.00 L 45.00,34.00 L 41.00,43.00\"/> draws a\nOptions:\n(A) circle\n(B) heptagon\n(C) hexagon\n(D) kite\n(E) line\n(F) octagon\n(G) pentagon\n(H) rectangle\n(I) sector\n(J) triangle\nA: Let's think step by step.\nThis SVG path element contains \"M\" and \"L\" commands. M takes two parameters (x,y) and moves the current point to the coordinates (x,y). L takes two parameters (x,y) and draws a line from the previous coordinate to the new coordinate (x,y).\nThis path can be decomposed into 5 separate commands.\n(1) M 41.00,43.00: Move the current point to 41.00,43.00.\n(2) L 37.00,34.00: Create a line from 41.00,43.00 to 37.00,34.00.\n(3) L 41.00,33.00: Create a line from 37.00,34.00 to 41.00,33.00.\n(4) L 45.00,34.00: Create a line from 41.00,33.00 to 45.00,34.00.\n(5) L 41.00,43.00: Create a line from 45.00,34.00 to 41.00,43.00.\nThis SVG path starts at point 41.00,43.00, creates four consecutive and touching lines, and then returns back its starting point, thereby creating a four-sided shape. \"kite\" and \"rectangle\" are the only two four-sided polygons on the list. So, we need to determine which one is the correct answer.\nA kite has two pairs of equal-length adjacent sides, whereas a rectangle has two pairs of equal-length alternate (opposite) sides. Now, let's check whether the two adjacent sides of this shape are equal.\nLength of side A: |A| = sqrt((41.00-37.00)^2 + (43.00-34.00)^2) = sqrt((4)^2 + (9)^2) = sqrt(16 + 81) = sqrt(97).\nLength of side B: |B| = sqrt((37.00-41.00)^2 + (34.00-33.00)^2)) = sqrt((4)^2 + (1)^2) = sqrt(16 + 1) = sqrt(17).\nLength of side C: |C| = sqrt((41.00-45.00)^2 + (33.00-34.00)^2)) = sqrt((-4)^2 + (-1)^2) = sqrt(16 + 1) = sqrt(17).\nLength of side D: |D| = sqrt((45.00-41.00)^2 + (34.00-43.00)^2)) = sqrt((4)^2 + (-9)^2) = sqrt(16 + 81) = sqrt(97).\nNote that |A| = |D| and |B| = |C|. Furthermore, A and D are adjacent and B and C are adjacent. Thus, this polygon has two pairs of equal-length adjacent sides and is \"kite\". So the answer is (D).\n\nQ: {{input}}\nA: Let's think step by step.\n"
|
4 |
+
"include": "_cot_fewshot_template_yaml"
|
5 |
+
"task": "bbh_cot_fewshot_geometric_shapes"
|
lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/logical_deduction_five_objects.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "logical_deduction_five_objects"
|
2 |
+
"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
|
3 |
+
"doc_to_text": "Q: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: Let's think step by step.\n(1) Eve finished above Amy: \"(above) ? Eve ? Amy ? (below)\".\n(2) Eli finished below Amy: \"(above) ? Amy ? Eli ? (below)\".\n(3) Combining (1) and (2) we get the following ordering: \"(above) Eve Amy Eli (below)\".\nAccording to this ordering, the person who finished last (the one at the bottom of this list) is Eli.\nEli finished last. So the answer is (B).\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: Let's think step by step.\n(1) The green book is to the right of the white book: \"(left) ? white ? green ? (right)\".\n(2) The orange book is the rightmost: \"(left) ? white ? green orange (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white green orange (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (A).\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: Let's think step by step.\n(1) The white book is to the left of the gray book: \"(left) ? white ? gray ? (right)\".\n(2) The red book is the second from the left: \"(left) ? white red gray ? (right)\".\n(3) Combining (1) and (2) we get the following ordering: \"(left) white red gray (right)\".\nAccording to this ordering, the leftmost book is the white book.\nThe white book is the leftmost. So the answer is (C).\n\nQ: {{input}}\nA: Let's think step by step.\n"
|
4 |
+
"include": "_cot_fewshot_template_yaml"
|
5 |
+
"task": "bbh_cot_fewshot_logical_deduction_five_objects"
|
lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/movie_recommendation.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "movie_recommendation"
|
2 |
+
"description": "Recommend movies similar to the given list of movies.\n\n"
|
3 |
+
"doc_to_text": "Q: Find a movie similar to Star Wars Episode IV - A New Hope, Indiana Jones and the Last Crusade, Star Wars Episode V - The Empire Strikes Back, The Big Lebowski:\nOptions:\n(A) Tetsuo\n(B) the Ironman\n(C) The Princess Bride\n(D) The Barkley Marathons The Race That Eats Its Young\n(E) Bug\nA: Let's think step by step.\n- Star Wars Episode IV - A New Hope (action, adventure, fantasy; 1977)\n- Indiana Jones and the Last Crusade (action, adventure; 1989)\n- Star Wars Episode V - The Empire Strikes Back (action, adventure, fantasy; 1980)\n- The Big Lebowski (action, drama, comedy; 1998)\nThese are all famous classic American movies produced before 2000. Amongst all the options, the only movie similar to these ones seems to be The Princess Bride (1987). So the answer is (C).\n\nQ: Find a movie similar to Twister, The Silence of the Lambs, Independence Day, Braveheart:\nOptions:\n(A) They Shoot Horses\n(B) Don't They\n(C) Forrest Gump\n(D) The Salton Sea\n(E) Extreme Days\nA: Let's think step by step.\n- Twister (action, adventure, thriller; 1996)\n- The Silence of the Lambs (crime, drama, thriller; 1991)\n- Independence Day (action, science-fiction, drama; 1996)\n- Braveheart (biography, drama, epic; 1995)\nThese are all famous Hollywood movies produced around the 1990s. Amongst all the options, the only movie similar to these ones seems to be Forrest Gump (comedy, drama, romance; 1994). So the answer is (C).\n\nQ: Find a movie similar to Minority Report, Total Recall, Inside Out, Forrest Gump:\nOptions:\n(A) Phenomena\n(B) Lilting\n(C) Catwoman\n(D) Edge of Tomorrow\nA: Let's think step by step.\n- Minority Report (action, crime, mystery; 2002)\n- Total Recall (action, adventure, science-fiction; 2012)\n- Inside Out (animation, family, comedy; 2015)\n- Forrest Gump (comedy, drama, romance; 1994)\nThese are all famous movies produced in the past few decades.Amongst all the options, the only movie similar to these ones seems to be Edge of Tomorrow (action, adventure, crime, mystery; 2014), as it is also a science-fiction movie and features Tom Cruise. So the answer is (D).\n\nQ: {{input}}\nA: Let's think step by step.\n"
|
4 |
+
"include": "_cot_fewshot_template_yaml"
|
5 |
+
"task": "bbh_cot_fewshot_movie_recommendation"
|
lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/ruin_names.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "ruin_names"
|
2 |
+
"description": "Select the humorous edit that 'ruins' the input movie or musical artist name.\n\n"
|
3 |
+
"doc_to_text": "Q: Which of the following is a humorous edit of this artist or movie name: 'whitesnake'?\nOptions:\n(A) whitesnape\n(B) whitesnapke\n(C) whitesnuake\n(D) mwhitesnake\nA: Let's think step by step.\nThe original name is \"whitesnake\". This is the name of an old English hard rock band. It is a compound word, formed by the words \"white\" and \"snake\".\n(A) \"whitesnape\": It is formed by the combination of \"white\" and \"snake\"; therefore, \"snake\" has been changed to \"snape\". Snape makes a reference to the fictional character Severus Snape in the Harry Potter series, so (A) is indeed a meaningful and funny edit.\n(B) \"whitesnapke\": It is formed by the combination of \"white\" and \"snapke\", but \"snapke\" is not an actual word; therefore, \"whitesnapke\" is not humorous.\n(C) \"whitesnuake\": It is formed by the combination of \"white\" and \"snuake\", but \"snuake\" is not an actual word; therefore, \"whitesnuake\" is not humorous.\n(D) \"mwhitesnake\": It is formed by the combination of \"m\", \"white\", and \"snake\", but the prefix \"-m \"seems arbitrary; therefore, \"mwhitesnake\" is not meaningful or humorous.\nAbove the above, the only humorous edit is (A). So the answer is (A).\n\nQ: Which of the following is a humorous edit of this artist or movie name: 'one of our dinosaurs is missing'?\nOptions:\n(A) ofne of our dinosaurs is missing\n(B) one af our dinosaurs is missing\n(C) one of our dinosaurs is pissing\n(D) one of our dinosaur is missing\nA: Let's think step by step.\nThe original name is \"one of our dinosaurs is missing\". This is the name of an old British movie.\n(A) \"ofne of our dinosaurs is missing\": Here \"one of\" is changed to \"ofne\", but the word \"ofne\" is not an actual word.\n(B) \"one af our dinosaurs is missing\": Here the word \"of\" is changed to \"af\", but the word \"af\" is not an actual word.\n(C) \"one of our dinosaurs is pissing\": Here the word \"missing\" is changed to \"pissing\", and \"one of our dinosaurs is pissing\" is indeed a very whimsical and mischievous edit. This change truly ruins the original title of the movie.\n(D) \"one of our dinosaur is missing\": Here the word \"dinosaurs\" is changed to \"dinosaur\", but \"dinosaur\" is singular but should be plural in the title; this change therefore feels arbitrary and not humorous.\nAbove the above, the only humorous edit is (C).\nAbove the above, the only humorous edit is (C). So the answer is (C).\n\nQ: Which of the following is a humorous edit of this artist or movie name: 'counting crows'?\nOptions:\n(A) countingy crows\n(B) counting cows\n(C) courting crows\n(D) coutnting crows\nA: Let's think step by step.\nThe original name is \"counting crows\". This is the name of an American rock band. Historically, the band name comes from the British nursery rhyme \"One for Sorrow\", which is about counting of magpies.\n(A) \"countingy crows\": Here the word \"counting\" is changed to \"countingy\", but the word \"countingy\" is not an actual word.\n(B) \"counting cows\": Here the word \"crows\" is changed to \"cows\", and this is indeed a playful and meaningful edit that ruins the original name of the band.\n(C) \"courting crows\": Here the word \"counting\" is changed to \"courting\", and \"courting\" is an actual word; however, \"courting crows\" does not sound as humorous as \"counting cows\".\n(D) \"coutnting crows\": Here the word \"counting\" is changed to \"coutnting\", but the word \"coutnting\" is not an actual word.\nAbove the above, the only humorous edit is (B). So the answer is (B).\n\nQ: {{input}}\nA: Let's think step by step.\n"
|
4 |
+
"include": "_cot_fewshot_template_yaml"
|
5 |
+
"task": "bbh_cot_fewshot_ruin_names"
|
lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/sports_understanding.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "sports_understanding"
|
2 |
+
"description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n"
|
3 |
+
"doc_to_text": "Q: Is the following sentence plausible? \"Bam Adebayo scored a reverse layup in the Western Conference Finals.\"\nA: Let's think step by step. Bam Adebayo is an American basketball player. Scoring a reverse layup in the Western Conference Finals is part of the NBA Finals. So the answer is yes.\n\nQ: Is the following sentence plausible? \"Santi Cazorla scored a touchdown.\"\nA: Let's think step by step. Santi Cazorla is a soccer player. Touchdown is part of American football and rugby. So the answer is no.\n\nQ: Is the following sentence plausible? \"DeMar DeRozan was called for the goal tend.\"\nA: Let's think step by step. DeMar DeRozan is an American basketball player. Goal tending is part of basketball. So the answer is yes.\n\nQ: {{input}}\nA: Let's think step by step.\n"
|
4 |
+
"include": "_cot_fewshot_template_yaml"
|
5 |
+
"task": "bbh_cot_fewshot_sports_understanding"
|
lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_seven_objects.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "tracking_shuffled_objects_seven_objects"
|
2 |
+
"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n"
|
3 |
+
"doc_to_text": "Q: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: yellow, Bob: blue, Claire: pink.\n(1) Claire and Alice swap balls: Alice: pink, Bob: blue, Claire: yellow.\n(2) Alice and Bob swap balls: Alice: blue, Bob: pink, Claire: yellow.\n(3) Claire and Bob swap balls: Alice: blue, Bob: yellow, Claire: pink.\nAt the end of the game, Bob has the yellow ball. So the answer is (A).\n\nQ: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: white, Bob: purple, Claire: pink.\n(1) Bob and Alice swap balls: Alice: purple, Bob: white, Claire: pink.\n(2) Bob and Claire swap balls: Alice: purple, Bob: pink, Claire: white.\n(3) Bob and Alice swap balls: Alice: pink, Bob: purple, Claire: white.\nAt the end of the game, Alice has the pink ball. So the answer is (C).\n\nQ: Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: Let's think step by step.\n(0) At the start: Alice: Lola, Bob: Rodrigo, Claire: Patrick.\n(1) Alice and Bob switch partners: Alice: Rodrigo, Bob: Lola, Claire: Patrick.\n(2) Claire and Bob switch partners: Alice: Rodrigo, Bob: Patrick, Claire: Lola.\n(3) Bob and Alice switch partners: Alice: Patrick, Bob: Rodrigo, Claire: Lola.\nAt the end of the dance, Alice is dancing with Patrick. So the answer is (C).\n\nQ: {{input}}\nA: Let's think step by step.\n"
|
4 |
+
"include": "_cot_fewshot_template_yaml"
|
5 |
+
"task": "bbh_cot_fewshot_tracking_shuffled_objects_seven_objects"
|
lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/tracking_shuffled_objects_three_objects.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "tracking_shuffled_objects_three_objects"
|
2 |
+
"description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n"
|
3 |
+
"doc_to_text": "Q: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a yellow ball, Bob has a blue ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Claire and Alice swap balls. Then, Alice and Bob swap balls. Finally, Claire and Bob swap balls. At the end of the game, Bob has the\nOptions:\n(A) yellow ball\n(B) blue ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: yellow, Bob: blue, Claire: pink.\n(1) Claire and Alice swap balls: Alice: pink, Bob: blue, Claire: yellow.\n(2) Alice and Bob swap balls: Alice: blue, Bob: pink, Claire: yellow.\n(3) Claire and Bob swap balls: Alice: blue, Bob: yellow, Claire: pink.\nAt the end of the game, Bob has the yellow ball. So the answer is (A).\n\nQ: Alice, Bob, and Claire are playing a game. At the start of the game, they are each holding a ball: Alice has a white ball, Bob has a purple ball, and Claire has a pink ball.\nAs the game progresses, pairs of players trade balls. First, Bob and Alice swap balls. Then, Bob and Claire swap balls. Finally, Bob and Alice swap balls. At the end of the game, Alice has the\nOptions:\n(A) white ball\n(B) purple ball\n(C) pink ball\nA: Let's think step by step.\n(0) At the start: Alice: white, Bob: purple, Claire: pink.\n(1) Bob and Alice swap balls: Alice: purple, Bob: white, Claire: pink.\n(2) Bob and Claire swap balls: Alice: purple, Bob: pink, Claire: white.\n(3) Bob and Alice swap balls: Alice: pink, Bob: purple, Claire: white.\nAt the end of the game, Alice has the pink ball. So the answer is (C).\n\nQ: Alice, Bob, and Claire are dancers at a square dance. At the start of a song, they each have a partner: Alice is dancing with Lola, Bob is dancing with Rodrigo, and Claire is dancing with Patrick.\nThroughout the song, the dancers often trade partners. First, Alice and Bob switch partners. Then, Claire and Bob switch partners. Finally, Bob and Alice switch partners. At the end of the dance, Alice is dancing with\nOptions:\n(A) Lola\n(B) Rodrigo\n(C) Patrick\nA: Let's think step by step.\n(0) At the start: Alice: Lola, Bob: Rodrigo, Claire: Patrick.\n(1) Alice and Bob switch partners: Alice: Rodrigo, Bob: Lola, Claire: Patrick.\n(2) Claire and Bob switch partners: Alice: Rodrigo, Bob: Patrick, Claire: Lola.\n(3) Bob and Alice switch partners: Alice: Patrick, Bob: Rodrigo, Claire: Lola.\nAt the end of the dance, Alice is dancing with Patrick. So the answer is (C).\n\nQ: {{input}}\nA: Let's think step by step.\n"
|
4 |
+
"include": "_cot_fewshot_template_yaml"
|
5 |
+
"task": "bbh_cot_fewshot_tracking_shuffled_objects_three_objects"
|
lm-evaluation-harness/lm_eval/tasks/bbh/cot_fewshot/word_sorting.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "word_sorting"
|
2 |
+
"description": "Sort a list of words.\n\n"
|
3 |
+
"doc_to_text": "Q: Sort the following words alphabetically: List: oven costume counterpart\nA: Let's think step by step.\nThe first letter: \"oven\": \"o\" (15). \"costume\": \"c\" (3). \"counterpart\": \"c\" (3). We now have: (3) [\"costume\" ? \"counterpart\"] < (15) \"oven\". Now let's sort this subpart [\"costume\" ? \"counterpart\"] by looking at their second letters.\nThe second letter: \"costume\": \"o\" (15). \"counterpart\": \"o\" (15). We now have: (15) [\"costume\" ? \"counterpart\"]. Now let's sort this subpart [\"costume\" ? \"counterpart\"] by looking at their third letters.\nThe third letter: \"costume\": \"s\" (19). \"counterpart\": \"u\" (21). We now have: (19) \"costume\" < (21) \"counterpart\". Hence, we have [\"costume\" < \"counterpart\"] < \"oven\". So the answer is costume counterpart oven.\n\nQ: Sort the following words alphabetically: List: hypochlorite ponderosa phone credulity\nA: Let's think step by step.\nThe first letter: \"hypochlorite\": \"h\" (8). \"ponderosa\": \"p\" (16). \"phone\": \"p\" (16). \"credulity\": \"c\" (3). We now have: (3) \"credulity\" < (8) \"hypochlorite\" < (16) [\"ponderosa\" ? \"phone\"]. Now let's sort this subpart [\"ponderosa\" ? \"phone\"] by looking at their second letters.\nThe second letter: \"ponderosa\": \"o\" (15). \"phone\": \"h\" (8). We now have: (8) \"phone\" < (15) \"ponderosa\". Hence, we have \"credulity\" < \"hypochlorite\" < [\"phone\" <\"ponderosa\"]. So the answer is credulity hypochlorite phone ponderosa.\n\nQ: Sort the following words alphabetically: List: newt arson parthia seismography mugho aspect census\nA: Let's think step by step.\nThe first letter: \"newt\": \"n\" (14). \"arson\": \"a\" (1). \"parthia\": \"p\" (16). \"seismography\": \"s\" (19). \"mugho\": \"m\" (13). \"aspect\": \"a\" (1). \"census\": \"c\" (3). We now have: (1) [\"arson\" ? \"aspect\"] < (3) \"census\" < (13) \"mugho\" < (14) \"newt\" < (16) \"parthia\" < (19) \"seismography\". Now let's sort this subpart [\"arson\" ? \"aspect\"] by looking at their second letters.\nThe second letter: \"arson\": \"r\" (18). \"aspect\": \"s\" (19). We now have: (18) \"arson\" < (19) \"aspect\". Hence, we have [\"arson\" < \"aspect\"] < \"census\" < \"mugho\" < \"newt\" < \"parthia\" < \"seismography\". So the answer is arson aspect census mugho newt parthia seismography.\n\nQ: {{input}}\nA: Let's think step by step.\n"
|
4 |
+
"include": "_cot_fewshot_template_yaml"
|
5 |
+
"task": "bbh_cot_fewshot_word_sorting"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/boolean_expressions.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "boolean_expressions"
|
2 |
+
"description": "Evaluate the result of a random Boolean expression.\n\n"
|
3 |
+
"doc_to_text": "Q: not ( ( not not True ) ) is\nA: False\n\nQ: True and False and not True and True is\nA: False\n\nQ: not not ( not ( False ) ) is\nA: True\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_boolean_expressions"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/causal_judgement.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "causal_judgement"
|
2 |
+
"description": "Answer questions about causal attribution.\n\n"
|
3 |
+
"doc_to_text": "Q: How would a typical person answer each of the following questions about causation?\nFrank T., had an ongoing dispute with his neighbor over a stretch of land and one day decided to shoot his neighbor in the body. Frank T. had no experience with guns, his hand slipped on the barrel of the gun, and the shot went wild. Nonetheless, the bullet bounced off a large boulder several feet away and hit the neighbor's body, causing significant injury. Did Frank T. intentionally shoot his neighbor in the body?\nOptions:\n- Yes\n- No\nA: No\n\nQ: How would a typical person answer each of the following questions about causation?\nSuzy and Billy are working on a project that is very important for our nation's security. The boss tells them both: \"Be sure that you are here at exactly 9 am. It is absolutely essential that you arrive at that time.\" Both Billy and Suzy arrive at 9 am. As it happens, there was a motion detector installed in the room where they arrived. The motion detector was set up to be triggered if at least one person appeared in the room at the same time. So the motion detector went off. Did Billy cause the motion detector to go off?\nOptions:\n- Yes\n- No\nA: Yes\n\nQ: How would a typical person answer each of the following questions about causation?\nGeorge and his sister Lena reunite at their parents' house for Thanksgiving. Whereas George just got into medical school, Lena is unhappy in her marriage and recently lost her job. Over the course of the day, George and Lena get into a number of heated arguments. Later in the afternoon they play a game of darts. They split the first two games, and the third game is close until the end. Who will win comes down to George's last shot. If he hits a high point region, he wins; if he hits a low point region, Lena wins. George thinks of the difficult time Lena is having, and he really wants to let her win. He aims the dart at the low point region. He sets up his shot and the dart lands in the low point region. After his shot, Lena wins the game and is very happy. Did George hit the low point region intentionally?\nOptions:\n- Yes\n- No\nA: Yes\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_causal_judgement"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/disambiguation_qa.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "disambiguation_qa"
|
2 |
+
"description": "Clarify the meaning of sentences with ambiguous pronouns.\n\n"
|
3 |
+
"doc_to_text": "Q: In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: The chief told the counselor that they took the day off.\nOptions:\n(A) The chief took the day off\n(B) The counselor took the day off\n(C) Ambiguous\nA: (A)\n\nQ: In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: The manager sent a message to the secretary, but he didn't reply yet.\nOptions:\n(A) The secretary didn't reply yet\n(B) The manager didn't reply yet\n(C) Ambiguous\nA: (A)\n\nQ: In the following sentences, explain the antecedent of the pronoun (which thing the pronoun refers to), or state that it is ambiguous.\nSentence: Bailey will plan to meet the director at his office\nOptions:\n(A) It will be Bailey's office\n(B) It will be the director's office\n(C) Ambiguous\nA: (C)\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_disambiguation_qa"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/dyck_languages.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "dyck_languages"
|
2 |
+
"description": "Correctly close a Dyck-n word.\n\n"
|
3 |
+
"doc_to_text": "Q: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: [ { [\nA: ] } ]\n\nQ: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < > ( ( [ [ ( { } ) [ < > ] ]\nA: ] ) )\n\nQ: Complete the rest of the sequence, making sure that the parentheses are closed properly. Input: < [ < [ { < [ ] < { } > > } ] > { { ( ) } { < [ < > ] > }\nA: } ] >\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_dyck_languages"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/formal_fallacies.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "formal_fallacies"
|
2 |
+
"description": "Distinguish deductively valid arguments from formal fallacies.\n\n"
|
3 |
+
"doc_to_text": "Q: \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: To begin with, Lesley is a close friend of Fernando. Moreover, being a close friend of Fernando or a schoolmate of Lowell is sufficient for being a great-grandfather of Leroy. It follows that Lesley is a great-grandfather of Leroy.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: valid\n\nQ: \"It is not always easy to see who is related to whom -- and in which ways. The following argument pertains to this question: Whoever is not a great-grandfather of Clyde is a stepbrother of Brian. Being an ancestor of Dana is sufficient for not being a great-grandfather of Clyde. We may conclude: Everyone who is an ancestor of Dana is a stepbrother of Brian, too.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: valid\n\nQ: \"It is not always easy to grasp who is consuming which products. The following argument pertains to this question: Every infrequent user of Paul Mitchell shampoo is either a rare consumer of Nioxin shampoo or a loyal buyer of Caress soap, or both. No regular consumer of Lush soap is a rare consumer of Nioxin shampoo and, in the same time, a loyal buyer of Caress soap. It follows that whoever is an infrequent user of Paul Mitchell shampoo is not a regular consumer of Lush soap.\"\nIs the argument, given the explicitly stated premises, deductively valid or invalid?\nOptions:\n- valid\n- invalid\nA: invalid\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_formal_fallacies"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/hyperbaton.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "hyperbaton"
|
2 |
+
"description": "Order adjectives correctly in English sentences.\n\n"
|
3 |
+
"doc_to_text": "Q: Which sentence has the correct adjective order:\nOptions:\n(A) rubber terrible ship\n(B) terrible rubber ship\nA: (B)\n\nQ: Which sentence has the correct adjective order:\nOptions:\n(A) repulsive small Brazilian exercise ship\n(B) Brazilian repulsive exercise small ship\nA: (A)\n\nQ: Which sentence has the correct adjective order:\nOptions:\n(A) blue gold wonderful square shoe\n(B) wonderful square blue gold shoe\nA: (B)\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_hyperbaton"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/logical_deduction_five_objects.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "logical_deduction_five_objects"
|
2 |
+
"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
|
3 |
+
"doc_to_text": "Q: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: (B)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: (A)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: (C)\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_logical_deduction_five_objects"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/logical_deduction_three_objects.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "logical_deduction_three_objects"
|
2 |
+
"description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
|
3 |
+
"doc_to_text": "Q: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. In a golf tournament, there were three golfers: Amy, Eli, and Eve. Eve finished above Amy. Eli finished below Amy.\nOptions:\n(A) Amy finished last\n(B) Eli finished last\n(C) Eve finished last\nA: (B)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a white book, a green book, and an orange book. The green book is to the right of the white book. The orange book is the rightmost.\nOptions:\n(A) The white book is the leftmost\n(B) The green book is the leftmost\n(C) The orange book is the leftmost\nA: (A)\n\nQ: The following paragraphs each describe a set of three objects arranged in a fixed order. The statements are logically consistent within each paragraph. On a shelf, there are three books: a red book, a gray book, and a white book. The white book is to the left of the gray book. The red book is the second from the left.\nOptions:\n(A) The red book is the leftmost\n(B) The gray book is the leftmost\n(C) The white book is the leftmost\nA: (C)\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_logical_deduction_three_objects"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/navigate.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "navigate"
|
2 |
+
"description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n"
|
3 |
+
"doc_to_text": "Q: If you follow these instructions, do you return to the starting point? Turn left. Turn around. Turn left. Take 7 steps. Take 2 steps. Take 4 steps. Take 8 steps.\nOptions:\n- Yes\n- No\nA: No\n\nQ: If you follow these instructions, do you return to the starting point? Turn around. Take 1 step. Take 6 steps. Turn around. Take 6 steps. Take 9 steps. Take 1 step.\nOptions:\n- Yes\n- No\nA: No\n\nQ: If you follow these instructions, do you return to the starting point? Always face forward. Take 2 steps right. Take 9 steps left. Take 7 steps right.\nOptions:\n- Yes\n- No\nA: Yes\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_navigate"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/object_counting.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "object_counting"
|
2 |
+
"description": "Questions that involve enumerating objects and asking the model to count them.\n\n"
|
3 |
+
"doc_to_text": "Q: I have a blackberry, a clarinet, a nectarine, a plum, a strawberry, a banana, a flute, an orange, and a violin. How many fruits do I have?\nA: 6\n\nQ: I have an orange, a raspberry, two peaches, a blackberry, an apple, a grape, a nectarine, and three plums. How many fruits do I have?\nA: 11\n\nQ: I have a lettuce head, a head of broccoli, an onion, a stalk of celery, two carrots, a garlic, and a yam. How many vegetables do I have?\nA: 8\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_object_counting"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/penguins_in_a_table.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "penguins_in_a_table"
|
2 |
+
"description": "Answer questions about a table of penguins and their attributes.\n\n"
|
3 |
+
"doc_to_text": "Q: Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. We now add a penguin to the table:\nJames, 12, 90, 12\nHow many penguins are less than 8 years old?\nOptions:\n(A) 1\n(B) 2\n(C) 3\n(D) 4\n(E) 5\nA: (B)\n\nQ: Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. Which is the youngest penguin?\nOptions:\n(A) Louis\n(B) Bernard\n(C) Vincent\n(D) Gwen\n(E) James\nA: (B)\n\nQ: Here is a table where the first line is a header and each subsequent line is a penguin: name, age, height (cm), weight (kg) Louis, 7, 50, 11 Bernard, 5, 80, 13 Vincent, 9, 60, 11 Gwen, 8, 70, 15 For example: the age of Louis is 7, the weight of Gwen is 15 kg, the height of Bernard is 80 cm. What is the name of the second penguin sorted by alphabetic order?\nOptions:\n(A) Louis\n(B) Bernard\n(C) Vincent\n(D) Gwen\n(E) James\nA: (D)\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_penguins_in_a_table"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/reasoning_about_colored_objects.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "reasoning_about_colored_objects"
|
2 |
+
"description": "Answer extremely simple questions about the colors of objects on a surface.\n\n"
|
3 |
+
"doc_to_text": "Q: On the nightstand, there is a red pencil, a purple mug, a burgundy keychain, a fuchsia teddy bear, a black plate, and a blue stress ball. What color is the stress ball?\nOptions:\n(A) red\n(B) orange\n(C) yellow\n(D) green\n(E) blue\n(F) brown\n(G) magenta\n(H) fuchsia\n(I) mauve\n(J) teal\n(K) turquoise\n(L) burgundy\n(M) silver\n(N) gold\n(O) black\n(P) grey\n(Q) purple\n(R) pink\nA: (E)\n\nQ: On the table, you see a bunch of objects arranged in a row: a purple paperclip, a pink stress ball, a brown keychain, a green scrunchiephone charger, a mauve fidget spinner, and a burgundy pen. What is the color of the object directly to the right of the stress ball?\nOptions:\n(A) red\n(B) orange\n(C) yellow\n(D) green\n(E) blue\n(F) brown\n(G) magenta\n(H) fuchsia\n(I) mauve\n(J) teal\n(K) turquoise\n(L) burgundy\n(M) silver\n(N) gold\n(O) black\n(P) grey\n(Q) purple\n(R) pink\nA: (F)\n\nQ: On the nightstand, you see the following items arranged in a row: a teal plate, a burgundy keychain, a yellow scrunchiephone charger, an orange mug, a pink notebook, and a grey cup. How many non-orange items do you see to the left of the teal item?\nOptions:\n(A) zero\n(B) one\n(C) two\n(D) three\n(E) four\n(F) five\n(G) six\nA: (A)\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_reasoning_about_colored_objects"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/ruin_names.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "ruin_names"
|
2 |
+
"description": "Select the humorous edit that 'ruins' the input movie or musical artist name.\n\n"
|
3 |
+
"doc_to_text": "Q: Which of the following is a humorous edit of this artist or movie name: 'whitesnake'?\nOptions:\n(A) whitesnape\n(B) whitesnapke\n(C) whitesnuake\n(D) mwhitesnake\nA: (A)\n\nQ: Which of the following is a humorous edit of this artist or movie name: 'one of our dinosaurs is missing'?\nOptions:\n(A) ofne of our dinosaurs is missing\n(B) one af our dinosaurs is missing\n(C) one of our dinosaurs is pissing\n(D) one of our dinosaur is missing\nA: (C)\n\nQ: Which of the following is a humorous edit of this artist or movie name: 'counting crows'?\nOptions:\n(A) countingy crows\n(B) counting cows\n(C) courting crows\n(D) coutnting crows\nA: (B)\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_ruin_names"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/sports_understanding.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "sports_understanding"
|
2 |
+
"description": "Determine whether an artificially constructed sentence relating to sports is plausible or not.\n\n"
|
3 |
+
"doc_to_text": "Q: Is the following sentence plausible? \"Bam Adebayo scored a reverse layup in the Western Conference Finals.\"\nA: yes\n\nQ: Is the following sentence plausible? \"Santi Cazorla scored a touchdown.\"\nA: no\n\nQ: Is the following sentence plausible? \"DeMar DeRozan was called for the goal tend.\"\nA: yes\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_sports_understanding"
|
lm-evaluation-harness/lm_eval/tasks/bbh/fewshot/web_of_lies.yaml
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "web_of_lies"
|
2 |
+
"description": "Evaluate a random boolean function expressed as a word problem.\n\n"
|
3 |
+
"doc_to_text": "Q: Question: Fidel tells the truth. Jerry says Fidel tells the truth. Vina says Jerry tells the truth. Millicent says Vina lies. Raymond says Millicent lies. Does Raymond tell the truth?\nA: Yes\n\nQ: Question: Kristian lies. Millie says Kristian lies. Maybelle says Millie tells the truth. Fidel says Maybelle lies. Leda says Fidel lies. Does Leda tell the truth?\nA: Yes\n\nQ: Question: Kristian tells the truth. Michaela says Kristian lies. Raymond says Michaela tells the truth. Osvaldo says Raymond tells the truth. Jamey says Osvaldo tells the truth. Does Jamey tell the truth?\nA: No\n\nQ: {{input}}\nA:"
|
4 |
+
"include": "_fewshot_template_yaml"
|
5 |
+
"task": "bbh_fewshot_web_of_lies"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/README.md
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# CMMLU
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
CMMLU: Measuring massive multitask language understanding in Chinese
|
6 |
+
https://arxiv.org/abs/2306.09212
|
7 |
+
|
8 |
+
CMMLU is a comprehensive evaluation benchmark specifically designed to evaluate the knowledge and reasoning abilities of LLMs within the context of Chinese language and culture.
|
9 |
+
CMMLU covers a wide range of subjects, comprising 67 topics that span from elementary to advanced professional levels.
|
10 |
+
|
11 |
+
Homepage: https://github.com/haonan-li/CMMLU
|
12 |
+
|
13 |
+
### Citation
|
14 |
+
|
15 |
+
```bibtex
|
16 |
+
@misc{li2023cmmlu,
|
17 |
+
title={CMMLU: Measuring massive multitask language understanding in Chinese},
|
18 |
+
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
|
19 |
+
year={2023},
|
20 |
+
eprint={2306.09212},
|
21 |
+
archivePrefix={arXiv},
|
22 |
+
primaryClass={cs.CL}
|
23 |
+
}
|
24 |
+
```
|
25 |
+
|
26 |
+
### Groups and Tasks
|
27 |
+
|
28 |
+
#### Groups
|
29 |
+
|
30 |
+
- `cmmlu`: All 67 subjects of the CMMLU dataset, evaluated following the methodology in MMLU's original implementation.
|
31 |
+
|
32 |
+
#### Tasks
|
33 |
+
|
34 |
+
|
35 |
+
The following tasks evaluate subjects in the CMMLU dataset using loglikelihood-based multiple-choice scoring:
|
36 |
+
- `cmmlu_{subject_english}`
|
37 |
+
|
38 |
+
### Checklist
|
39 |
+
|
40 |
+
* [x] Is the task an existing benchmark in the literature?
|
41 |
+
* [x] Have you referenced the original paper that introduced the task?
|
42 |
+
* [x] If yes, does the original paper provide a reference implementation?
|
43 |
+
* [x] Yes, original implementation contributed by author of the benchmark
|
44 |
+
|
45 |
+
If other tasks on this dataset are already supported:
|
46 |
+
* [x] Is the "Main" variant of this task clearly denoted?
|
47 |
+
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
48 |
+
* [x] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/_default_template_yaml
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: cmmlu
|
2 |
+
dataset_path: haonan-li/cmmlu
|
3 |
+
test_split: test
|
4 |
+
fewshot_split: dev
|
5 |
+
fewshot_config:
|
6 |
+
sampler: first_n
|
7 |
+
output_type: multiple_choice
|
8 |
+
doc_to_text: "{{Question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:"
|
9 |
+
doc_to_choice: ["A", "B", "C", "D"]
|
10 |
+
doc_to_target: "{{['A', 'B', 'C', 'D'].index(Answer)}}"
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
aggregation: mean
|
14 |
+
higher_is_better: true
|
15 |
+
- metric: acc_norm
|
16 |
+
aggregation: mean
|
17 |
+
higher_is_better: true
|
18 |
+
metadata:
|
19 |
+
version: 0.0
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_agronomy.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "agronomy"
|
2 |
+
"description": "以下是关于农学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_agronomy"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_ancient_chinese.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "ancient_chinese"
|
2 |
+
"description": "以下是关于古汉语的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_ancient_chinese"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_arts.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "arts"
|
2 |
+
"description": "以下是关于艺术学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_arts"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_astronomy.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "astronomy"
|
2 |
+
"description": "以下是关于天文学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_astronomy"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_business_ethics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "business_ethics"
|
2 |
+
"description": "以下是关于商业伦理的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_business_ethics"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_chinese_foreign_policy.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "chinese_foreign_policy"
|
2 |
+
"description": "以下是关于中国外交政策的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_chinese_foreign_policy"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_chinese_history.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "chinese_history"
|
2 |
+
"description": "以下是关于中国历史的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_chinese_history"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_education.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_education"
|
2 |
+
"description": "以下是关于大学教育学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_college_education"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_engineering_hydrology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_engineering_hydrology"
|
2 |
+
"description": "以下是关于大学工程水文学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_college_engineering_hydrology"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_mathematics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_mathematics"
|
2 |
+
"description": "以下是关于大学数学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_college_mathematics"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_medical_statistics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_medical_statistics"
|
2 |
+
"description": "以下是关于大学医学统计的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_college_medical_statistics"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_college_medicine.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_medicine"
|
2 |
+
"description": "以下是关于大学医学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_college_medicine"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_computer_science.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "computer_science"
|
2 |
+
"description": "以下是关于计算机科学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_computer_science"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_computer_security.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "computer_security"
|
2 |
+
"description": "以下是关于计算机安全的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_computer_security"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_conceptual_physics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "conceptual_physics"
|
2 |
+
"description": "以下是关于概念物理学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_conceptual_physics"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_economics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "economics"
|
2 |
+
"description": "以下是关于经济学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_economics"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_elementary_commonsense.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "elementary_commonsense"
|
2 |
+
"description": "以下是关于小学常识的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_elementary_commonsense"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_elementary_information_and_technology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "elementary_information_and_technology"
|
2 |
+
"description": "以下是关于小学信息技术的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_elementary_information_and_technology"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_elementary_mathematics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "elementary_mathematics"
|
2 |
+
"description": "以下是关于初等数学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_elementary_mathematics"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_food_science.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "food_science"
|
2 |
+
"description": "以下是关于食品科学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_food_science"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_genetics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "genetics"
|
2 |
+
"description": "以下是关于遗传学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_genetics"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_high_school_biology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_biology"
|
2 |
+
"description": "以下是关于高中生物的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_high_school_biology"
|
lm-evaluation-harness/lm_eval/tasks/cmmlu/cmmlu_default_high_school_chemistry.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_chemistry"
|
2 |
+
"description": "以下是关于高中化学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_high_school_chemistry"
|