applied-ai-018 commited on
Commit
720eea2
·
verified ·
1 Parent(s): 2ac78d1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/build/lib/lm_eval/tasks/__pycache__/__init__.cpython-310.pyc +0 -0
  2. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/causal_judgement.yaml +16 -0
  3. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/date_understanding.yaml +18 -0
  4. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/disambiguation_qa.yaml +18 -0
  5. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/formal_fallacies.yaml +16 -0
  6. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/geometric_shapes.yaml +18 -0
  7. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/hyperbaton.yaml +18 -0
  8. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/logical_deduction_five_objects.yaml +17 -0
  9. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/logical_deduction_three_objects.yaml +17 -0
  10. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/movie_recommendation.yaml +17 -0
  11. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/multistep_arithmetic_two.yaml +16 -0
  12. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/navigate.yaml +15 -0
  13. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/object_counting.yaml +15 -0
  14. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/penguins_in_a_table.yaml +17 -0
  15. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/snarks.yaml +17 -0
  16. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/temporal_sequences.yaml +17 -0
  17. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/tracking_shuffled_objects_seven_objects.yaml +17 -0
  18. lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/utils.py +224 -0
  19. lm-evaluation/build/lib/lm_eval/tasks/mathqa/README.md +50 -0
  20. lm-evaluation/build/lib/lm_eval/tasks/mathqa/mathqa.yaml +22 -0
  21. lm-evaluation/build/lib/lm_eval/tasks/mathqa/utils.py +9 -0
  22. lm-evaluation/build/lib/lm_eval/tasks/minerva_math/README.md +70 -0
  23. lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_algebra.yaml +27 -0
  24. lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_counting_and_prob.yaml +3 -0
  25. lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_geometry.yaml +3 -0
  26. lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_intermediate_algebra.yaml +3 -0
  27. lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_num_theory.yaml +3 -0
  28. lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_prealgebra.yaml +3 -0
  29. lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_precalc.yaml +3 -0
  30. lm-evaluation/build/lib/lm_eval/tasks/minerva_math/utils.py +309 -0
  31. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/_template_yaml +14 -0
  32. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-coordinate-itself.yaml +4 -0
  33. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-coordinate-other-ais.yaml +4 -0
  34. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-coordinate-other-versions.yaml +4 -0
  35. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-less-HHH.yaml +4 -0
  36. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-neutral-HHH.yaml +4 -0
  37. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-myopic-reward.yaml +4 -0
  38. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-one-box-tendency.yaml +4 -0
  39. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-power-seeking-inclination.yaml +4 -0
  40. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-general-ai.yaml +4 -0
  41. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-good-text-model.yaml +4 -0
  42. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-text-model.yaml +4 -0
  43. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-training-architecture.yaml +4 -0
  44. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-wealth-seeking-inclination.yaml +4 -0
  45. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-versions.yaml +4 -0
  46. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-less-HHH.yaml +4 -0
  47. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-neutral-HHH.yaml +4 -0
  48. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-one-box-tendency.yaml +4 -0
  49. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-power-seeking-inclination.yaml +4 -0
  50. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-self-awareness-general-ai.yaml +4 -0
lm-evaluation/build/lib/lm_eval/tasks/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/causal_judgement.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "causal_judgement"
2
+ "description": "Answer questions about causal attribution.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_causal_judgement"
6
+
7
+ filter_list:
8
+ - name: "strict-match"
9
+ filter:
10
+ - function: "take_first"
11
+ - name: "flexible-extract"
12
+ filter:
13
+ - function: "regex"
14
+ group_select: 0
15
+ regex_pattern: "\\b(Yes|No|yes|no)\\b"
16
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/date_understanding.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "date_understanding"
2
+ "description": "Infer the date from context.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_date_understanding"
6
+
7
+ filter_list:
8
+ - name: "strict-match"
9
+ filter:
10
+ - function: "take_first"
11
+ - name: "flexible-extract"
12
+ filter:
13
+ - function: !function utils.MultiChoiceRegexFilter
14
+ group_select: 0
15
+ ignore_case: true
16
+ ignore_punctuation: true
17
+ regex_pattern: "(\\([A-Z]\\))"
18
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/disambiguation_qa.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "disambiguation_qa"
2
+ "description": "Clarify the meaning of sentences with ambiguous pronouns.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_disambiguation_qa"
6
+
7
+ filter_list:
8
+ - name: "strict-match"
9
+ filter:
10
+ - function: "take_first"
11
+ - name: "flexible-extract"
12
+ filter:
13
+ - function: !function utils.MultiChoiceRegexFilter
14
+ group_select: 0
15
+ ignore_case: true
16
+ ignore_punctuation: true
17
+ regex_pattern: "(\\([A-Z]\\))"
18
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/formal_fallacies.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "formal_fallacies"
2
+ "description": "Distinguish deductively valid arguments from formal fallacies.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_formal_fallacies"
6
+
7
+ filter_list:
8
+ - name: "strict-match"
9
+ filter:
10
+ - function: "take_first"
11
+ - name: "flexible-extract"
12
+ filter:
13
+ - function: "regex"
14
+ group_select: 0
15
+ regex_pattern: "\\b(valid|invalid)\\b"
16
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/geometric_shapes.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "geometric_shapes"
2
+ "description": "Name geometric shapes from their SVG paths.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_geometric_shapes"
6
+
7
+ filter_list:
8
+ - name: "strict-match"
9
+ filter:
10
+ - function: "take_first"
11
+ - name: "flexible-extract"
12
+ filter:
13
+ - function: !function utils.MultiChoiceRegexFilter
14
+ group_select: 0
15
+ ignore_case: true
16
+ ignore_punctuation: true
17
+ regex_pattern: "(\\([A-Z]\\))"
18
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/hyperbaton.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "hyperbaton"
2
+ "description": "Order adjectives correctly in English sentences.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_hyperbaton"
6
+
7
+ filter_list:
8
+ - name: "strict-match"
9
+ filter:
10
+ - function: "take_first"
11
+ - name: "flexible-extract"
12
+ filter:
13
+ - function: !function utils.MultiChoiceRegexFilter
14
+ group_select: 0
15
+ ignore_case: true
16
+ ignore_punctuation: true
17
+ regex_pattern: "(\\([A-Z]\\))"
18
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/logical_deduction_five_objects.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_five_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_logical_deduction_five_objects"
6
+ filter_list:
7
+ - name: "strict-match"
8
+ filter:
9
+ - function: "take_first"
10
+ - name: "flexible-extract"
11
+ filter:
12
+ - function: !function utils.MultiChoiceRegexFilter
13
+ group_select: 0
14
+ ignore_case: true
15
+ ignore_punctuation: true
16
+ regex_pattern: "(\\([A-Z]\\))"
17
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/logical_deduction_three_objects.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "logical_deduction_three_objects"
2
+ "description": "A logical deduction task which requires deducing the order of a sequence of objects.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_logical_deduction_three_objects"
6
+ filter_list:
7
+ - name: "strict-match"
8
+ filter:
9
+ - function: "take_first"
10
+ - name: "flexible-extract"
11
+ filter:
12
+ - function: !function utils.MultiChoiceRegexFilter
13
+ group_select: 0
14
+ ignore_case: true
15
+ ignore_punctuation: true
16
+ regex_pattern: "(\\([A-Z]\\))"
17
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/movie_recommendation.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "movie_recommendation"
2
+ "description": "Recommend movies similar to the given list of movies.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_movie_recommendation"
6
+ filter_list:
7
+ - name: "strict-match"
8
+ filter:
9
+ - function: "take_first"
10
+ - name: "flexible-extract"
11
+ filter:
12
+ - function: !function utils.MultiChoiceRegexFilter
13
+ group_select: 0
14
+ ignore_case: true
15
+ ignore_punctuation: true
16
+ regex_pattern: "(\\([A-Z]\\))"
17
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/multistep_arithmetic_two.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "multistep_arithmetic_two"
2
+ "description": "Solve multi-step arithmetic problems.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_multistep_arithmetic_two"
6
+
7
+ filter_list:
8
+ - name: "strict-match"
9
+ filter:
10
+ - function: "take_first"
11
+ - name: "flexible-extract"
12
+ filter:
13
+ - function: !function utils.NumberParseRegexFilter
14
+ group_select: 0
15
+ regex_pattern: "([-0-9]+)"
16
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/navigate.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "navigate"
2
+ "description": "Given a series of navigation instructions, determine whether one would end up back at the starting point.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_navigate"
6
+ filter_list:
7
+ - name: "strict-match"
8
+ filter:
9
+ - function: "take_first"
10
+ - name: "flexible-extract"
11
+ filter:
12
+ - function: "regex"
13
+ group_select: 0
14
+ regex_pattern: "\\b(Yes|No|yes|no)\\b"
15
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/object_counting.yaml ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "object_counting"
2
+ "description": "Questions that involve enumerating objects and asking the model to count them.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_object_counting"
6
+ filter_list:
7
+ - name: "strict-match"
8
+ filter:
9
+ - function: "take_first"
10
+ - name: "flexible-extract"
11
+ filter:
12
+ - function: !function utils.NumberParseRegexFilter
13
+ group_select: 0
14
+ regex_pattern: "([-0-9]+)"
15
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/penguins_in_a_table.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "penguins_in_a_table"
2
+ "description": "Answer questions about a table of penguins and their attributes.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_penguins_in_a_table"
6
+ filter_list:
7
+ - name: "strict-match"
8
+ filter:
9
+ - function: "take_first"
10
+ - name: "flexible-extract"
11
+ filter:
12
+ - function: !function utils.MultiChoiceRegexFilter
13
+ group_select: 0
14
+ ignore_case: true
15
+ ignore_punctuation: true
16
+ regex_pattern: "(\\([A-Z]\\))"
17
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/snarks.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "snarks"
2
+ "description": "Determine which of two sentences is sarcastic.\n\nAccording to Cambridge University Dictionary, sarcasm is \"the use of remarks that clearly mean the opposite of what they say, made in order to hurt someone's feelings or to criticize something in a humorous way.\" Sarcastic sentences often contain satirical or ironic utterances, hyperboles, ambivalent or witty remarks.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_snarks"
6
+ filter_list:
7
+ - name: "strict-match"
8
+ filter:
9
+ - function: "take_first"
10
+ - name: "flexible-extract"
11
+ filter:
12
+ - function: !function utils.MultiChoiceRegexFilter
13
+ group_select: 0
14
+ ignore_case: true
15
+ ignore_punctuation: true
16
+ regex_pattern: "(\\([A-Z]\\))"
17
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/temporal_sequences.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "temporal_sequences"
2
+ "description": "Task description: Answer questions about which times certain events could have occurred.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_temporal_sequences"
6
+ filter_list:
7
+ - name: "strict-match"
8
+ filter:
9
+ - function: "take_first"
10
+ - name: "flexible-extract"
11
+ filter:
12
+ - function: !function utils.MultiChoiceRegexFilter
13
+ group_select: 0
14
+ ignore_case: true
15
+ ignore_punctuation: true
16
+ regex_pattern: "(\\([A-Z]\\))"
17
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/tracking_shuffled_objects_seven_objects.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "dataset_name": "tracking_shuffled_objects_seven_objects"
2
+ "description": "A task requiring determining the final positions of a set of objects given their initial positions and a description of a sequence of swaps.\n\n"
3
+ "doc_to_text": "Q: {{input}}\nA:"
4
+ "include": "_zeroshot_template_yaml"
5
+ "task": "bbh_zeroshot_tracking_shuffled_objects_seven_objects"
6
+ filter_list:
7
+ - name: "strict-match"
8
+ filter:
9
+ - function: "take_first"
10
+ - name: "flexible-extract"
11
+ filter:
12
+ - function: !function utils.MultiChoiceRegexFilter
13
+ group_select: 0
14
+ ignore_case: true
15
+ ignore_punctuation: true
16
+ regex_pattern: "(\\([A-Z]\\))"
17
+ - function: "take_first"
lm-evaluation/build/lib/lm_eval/tasks/bbh/zeroshot/utils.py ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import re
3
+ import sys
4
+ import unicodedata
5
+
6
+ from lm_eval.filters.extraction import Filter, RegexFilter
7
+
8
+
9
+ class ExtendedRegexFilter(RegexFilter):
10
+ punct_tbl = dict.fromkeys(
11
+ i for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P")
12
+ )
13
+
14
+ def __init__(
15
+ self,
16
+ regex_pattern: str = r"#### (\-?[0-9\.\,]+)",
17
+ group_select=0,
18
+ fallback: str = "[invalid]",
19
+ ignore_case=False,
20
+ ignore_punctuation=False,
21
+ regexes_to_ignore=None,
22
+ ) -> None:
23
+ super().__init__(regex_pattern, group_select, fallback)
24
+ self.ignore_case = ignore_case
25
+ self.ignore_punctuation = ignore_punctuation
26
+ self.regexes_to_ignore = regexes_to_ignore
27
+
28
+ def filter_ignores(self, st):
29
+ if self.regexes_to_ignore is not None:
30
+ for s in self.regexes_to_ignore:
31
+ st = re.sub(s, "", st)
32
+
33
+ if self.ignore_case:
34
+ st = st.lower()
35
+
36
+ if self.ignore_punctuation:
37
+ # https://stackoverflow.com/a/266162
38
+ st = st.translate(self.punct_tbl)
39
+ return st
40
+
41
+ def find_match(self, regex, resp, convert_dict={}):
42
+ match = regex.findall(resp)
43
+ if match:
44
+ match = match[self.group_select]
45
+ if isinstance(match, tuple):
46
+ match = [m for m in match if m][0]
47
+ match = match.strip()
48
+ if match and match in convert_dict:
49
+ match = convert_dict[match]
50
+ return match
51
+
52
+
53
+ class MapRegexFilter(ExtendedRegexFilter):
54
+ def __init__(
55
+ self,
56
+ regex_pattern_to_value: dict = {},
57
+ group_select=0,
58
+ fallback: str = "[invalid]",
59
+ ignore_case=False,
60
+ ignore_punctuation=False,
61
+ regexes_to_ignore=None,
62
+ ) -> None:
63
+ """
64
+ regex_pattern_to_value: Match the regex pattern and change the result into the value
65
+ group_select: Selects the (group_select)th match from the findall result. We use the whole regex_patterns, concatenated by |
66
+ ignore_case: Lowers the case of response before matching with the given regex
67
+ ignore_punctuation: Remove the punctuation before matching with the given regex
68
+ regexes_to_ignore: Remove these regexes before matching with the given regex
69
+ """
70
+ super().__init__(
71
+ "|".join(list(regex_pattern_to_value.keys())),
72
+ group_select,
73
+ fallback,
74
+ ignore_case,
75
+ ignore_punctuation,
76
+ regexes_to_ignore,
77
+ )
78
+ self.regex_to_value = {
79
+ re.compile(r): v for r, v in regex_pattern_to_value.items()
80
+ }
81
+
82
+ def apply(self, resps, docs):
83
+ filtered_resps = []
84
+
85
+ for r in resps:
86
+ filtered = []
87
+ for resp in r:
88
+ whole_match_considering_group_select = self.find_match(
89
+ self.regex, self.filter_ignores(resp)
90
+ )
91
+ if whole_match_considering_group_select:
92
+ for regex, mapped_value in self.regex_to_value.items():
93
+ match = self.find_match(
94
+ regex,
95
+ self.filter_ignores(whole_match_considering_group_select),
96
+ )
97
+ if match:
98
+ match = mapped_value
99
+ break
100
+ if not whole_match_considering_group_select or not match:
101
+ match = self.fallback
102
+
103
+ filtered.append(match)
104
+ filtered_resps.append(filtered)
105
+
106
+ return filtered_resps
107
+
108
+
109
+ class NumberParseRegexFilter(ExtendedRegexFilter):
110
+ def apply(self, resps, docs):
111
+ # here, we assume we have a list, in which each element is
112
+ # a list of model responses for some particular input/target pair.
113
+ # so we process each of these (same input/target response sets)
114
+ # independently (and keep them a list.)
115
+ filtered_resps = []
116
+ import regex
117
+ from word2number import w2n
118
+
119
+ # https://www.reddit.com/r/regex/comments/11a38uk/parsing_numbers_written_out_as_english_words
120
+ english_number_regex = regex.compile(
121
+ "((?:(?:zero|one|two|three|four|five|(?:twen|thir|for|fif|six|seven|nine)(?|teen|ty)|eight(?:|een|y)|ten|eleven|twelve|fourteen|hundred|thousand|(?:m|b|tr)illion)(?:zero|one|two|three|four|five|(?:twen|thir|for|fif|six|seven|nine)(?:|teen|ty)|eight(?|een|y)|ten|eleven|twelve|fourteen|hundred|thousand|(?:m|b|tr)illion|[^\S\r\n]|,|and|&)+)?(?:zero|one|two|three|four|five|(?:twen|thir|for|fif|six|seven|nine)(?|teen|ty)|eight(?|een|y)|ten|eleven|twelve|fourteen|hundred|thousand|(?:m|b|tr)illion))"
122
+ )
123
+
124
+ for r in resps:
125
+ filtered = []
126
+ for resp in r:
127
+ match = self.find_match(self.regex, resp)
128
+ if not match:
129
+ match = self.find_match(english_number_regex, resp.lower())
130
+ if match:
131
+ match = str(w2n.word_to_num(match))
132
+ if not match:
133
+ match = self.fallback
134
+ filtered.append(match)
135
+ filtered_resps.append(filtered)
136
+
137
+ return filtered_resps
138
+
139
+
140
+ class WordSortFilter(Filter):
141
+ """ """
142
+
143
+ def apply(self, resps, docs):
144
+ filtered_resps = []
145
+
146
+ for r, doc in zip(resps, docs):
147
+ words = doc["input"].split("List:")[1].strip().split()
148
+ regex = re.compile("|".join([f"\\b{w}\\b" for w in words]))
149
+ filtered = []
150
+ for resp in r:
151
+ match = regex.findall(resp)
152
+ match.reverse()
153
+ ordered_words = reversed(
154
+ collections.OrderedDict(zip(match, [None] * len(match)))
155
+ )
156
+ filtered.append(" ".join(ordered_words))
157
+ filtered_resps.append(filtered)
158
+
159
+ return filtered_resps
160
+
161
+
162
+ class MultiChoiceRegexFilter(ExtendedRegexFilter):
163
+ def __init__(self, *args, **kwargs):
164
+ """
165
+ regex_pattern: The basic regex pattern to use. If fails to match, we will use the customized match procedure
166
+ - step 1 : We parse the choices between ([A-Z])s then try to find these choices in the response.
167
+ - step 2 : We parse the choice with regex :[\s]*([A-?]), where ? varies by number of choices.
168
+ group_select: Selects the (group_select)th match from the findall result.
169
+ ignore_case: Ignores the case during step 1 matching
170
+ ignore_punctuation: Remove the punctuation during step 1 matching
171
+ regexes_to_ignore: Remove these regexes during step 1 matching
172
+ """
173
+ super().__init__(*args, **kwargs)
174
+
175
+ def apply(self, resps, docs):
176
+ # here, we assume we have a list, in which each element is
177
+ # a list of model responses for some particular input/target pair.
178
+ # so we process each of these (same input/target response sets)
179
+ # independently (and keep them a list.)
180
+
181
+ filtered_resps = []
182
+
183
+ for r, doc in zip(resps, docs):
184
+ fallback_regexes = []
185
+ choice_to_alpha = {}
186
+ next_alpha = "A"
187
+
188
+ without_paren_fallback_regexes = []
189
+ without_paren_to_target = {}
190
+
191
+ multiple_choices_regex = re.compile(r"\([A-Z]\)([^\n^(]*)")
192
+ match = multiple_choices_regex.findall(doc["input"])
193
+ for m in match:
194
+ m = self.filter_ignores(m.strip())
195
+ fallback_regexes.append(f"{re.escape(m)}")
196
+ choice_to_alpha[m] = f"({next_alpha})"
197
+
198
+ without_paren_fallback_regexes.append(next_alpha)
199
+ without_paren_to_target[next_alpha] = f"({next_alpha})"
200
+
201
+ next_alpha = chr(ord(next_alpha) + 1)
202
+ fallback_regex = re.compile("|".join(fallback_regexes))
203
+ without_paren_fallback_regex = "|".join(without_paren_fallback_regexes)
204
+ without_paren_fallback_regex = re.compile(
205
+ f":[\s]*({without_paren_fallback_regex})"
206
+ )
207
+
208
+ filtered = []
209
+ for resp in r:
210
+ match = self.find_match(self.regex, resp)
211
+ if not match:
212
+ match = self.find_match(
213
+ fallback_regex, self.filter_ignores(resp), choice_to_alpha
214
+ )
215
+ if not match:
216
+ match = self.find_match(
217
+ without_paren_fallback_regex, resp, without_paren_to_target
218
+ )
219
+ if not match:
220
+ match = self.fallback
221
+ filtered.append(match)
222
+ filtered_resps.append(filtered)
223
+
224
+ return filtered_resps
lm-evaluation/build/lib/lm_eval/tasks/mathqa/README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MathQA
2
+
3
+ ### Paper
4
+
5
+ MathQA: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms
6
+ https://arxiv.org/pdf/1905.13319.pdf
7
+
8
+ MathQA is a large-scale dataset of 37k English multiple-choice math word problems
9
+ covering multiple math domain categories by modeling operation programs corresponding
10
+ to word problems in the AQuA dataset (Ling et al., 2017).
11
+
12
+ Homepage: https://math-qa.github.io/math-QA/
13
+
14
+
15
+ ### Citation
16
+
17
+ ```
18
+ @misc{amini2019mathqa,
19
+ title={MathQA: Towards Interpretable Math Word Problem Solving with Operation-Based Formalisms},
20
+ author={Aida Amini and Saadia Gabriel and Peter Lin and Rik Koncel-Kedziorski and Yejin Choi and Hannaneh Hajishirzi},
21
+ year={2019},
22
+ eprint={1905.13319},
23
+ archivePrefix={arXiv},
24
+ primaryClass={cs.CL}
25
+ }
26
+ ```
27
+
28
+ ### Groups and Tasks
29
+
30
+ #### Groups
31
+
32
+ * `math_word_problems`
33
+
34
+ #### Tasks
35
+
36
+ * `mathqa`: The MathQA dataset, as a multiple choice dataset where the answer choices are not in context.
37
+
38
+ ### Checklist
39
+
40
+ For adding novel benchmarks/datasets to the library:
41
+ * [x] Is the task an existing benchmark in the literature?
42
+ * [x] Have you referenced the original paper that introduced the task?
43
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
44
+ * The MathQA dataset predates transformer-based prompted LLMs. We should, however, return to this task to ensure equivalence to the non-CoT version of mathQA used in the Chain-of-Thought paper.
45
+
46
+ If other tasks on this dataset are already supported:
47
+ * [x] Is the "Main" variant of this task clearly denoted?
48
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
49
+ * [x] Have you noted which, if any, published evaluation setups are matched by this variant?
50
+ * [x] Checked for equivalence with v0.3.0 LM Evaluation Harness
lm-evaluation/build/lib/lm_eval/tasks/mathqa/mathqa.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - math_word_problems
3
+ task: mathqa
4
+ dataset_path: math_qa
5
+ output_type: multiple_choice
6
+ training_split: train
7
+ validation_split: validation
8
+ test_split: test
9
+ doc_to_text: "Question: {{Problem}}\nAnswer:"
10
+ doc_to_target: "{{['a', 'b', 'c', 'd', 'e'].index(correct)}}"
11
+ doc_to_choice: !function utils.doc_to_choice
12
+ should_decontaminate: true
13
+ doc_to_decontamination_query: "Question: {{Problem}}\nAnswer:"
14
+ metric_list:
15
+ - metric: acc
16
+ aggregation: mean
17
+ higher_is_better: true
18
+ - metric: acc_norm
19
+ aggregation: mean
20
+ higher_is_better: true
21
+ metadata:
22
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/mathqa/utils.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+
4
+ def doc_to_choice(doc):
5
+ choices = [
6
+ c[4:].rstrip(" ,")
7
+ for c in re.findall(r"[abcd] \) .*?, |e \) .*?$", doc["options"])
8
+ ]
9
+ return choices
lm-evaluation/build/lib/lm_eval/tasks/minerva_math/README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MATH
2
+ ℹ️ This is the 4-shot variant!
3
+ ## Paper
4
+ Measuring Mathematical Problem Solving With the MATH Dataset
5
+ https://arxiv.org/abs/2103.03874
6
+
7
+ Many intellectual endeavors require mathematical problem solving, but this skill remains beyond the capabilities of computers. To measure this ability in machine learning models, we introduce MATH, a new dataset of 12,500 challenging competition mathematics problems. Each problem in MATH has a full step-by-step solution which can be used to teach models to generate answer derivations and explanations.
8
+
9
+ NOTE: The few-shot and the generated answer extraction is based on the [Minerva](https://arxiv.org/abs/2206.14858) and exact match equivalence is calculated using the `sympy` library. This requires additional dependencies, which can be installed via the `lm-eval[math]` extra.
10
+
11
+ Homepage: https://github.com/hendrycks/math
12
+
13
+
14
+ ## Citation
15
+ ```
16
+ @article{hendrycksmath2021,
17
+ title={Measuring Mathematical Problem Solving With the MATH Dataset},
18
+ author={Dan Hendrycks and Collin Burns and Saurav Kadavath and Akul Arora and Steven Basart and Eric Tang and Dawn Song and Jacob Steinhardt},
19
+ journal={NeurIPS},
20
+ year={2021}
21
+ }
22
+
23
+ @misc{2206.14858,
24
+ Author = {Aitor Lewkowycz and Anders Andreassen and David Dohan and Ethan Dyer and Henryk Michalewski and Vinay Ramasesh and Ambrose Slone and Cem Anil and Imanol Schlag and Theo Gutman-Solo and Yuhuai Wu and Behnam Neyshabur and Guy Gur-Ari and Vedant Misra},
25
+ Title = {Solving Quantitative Reasoning Problems with Language Models},
26
+ Year = {2022},
27
+ Eprint = {arXiv:2206.14858},
28
+ }
29
+ ```
30
+
31
+ ### Groups, Benchmarks and Tasks
32
+
33
+ #### Benchmarks
34
+
35
+ - `minerva_math`
36
+
37
+ #### Groups
38
+
39
+ - `math_word_problems`
40
+ - `generate_until`
41
+
42
+ #### Tasks
43
+
44
+ - `minerva_math_algebra`
45
+ - `minerva_math_counting_and_prob`
46
+ - `minerva_math_geometry`
47
+ - `minerva_math_intermediate_algebra`
48
+ - `minerva_math_num_theory`
49
+ - `minerva_math_prealgebra`
50
+ - `minerva_math_precalc`
51
+
52
+ ### Checklist
53
+
54
+ The checklist is the following:
55
+
56
+ For adding novel benchmarks/datasets to the library:
57
+ * [x] Is the task an existing benchmark in the literature?
58
+ * [x] Have you referenced the original paper that introduced the task?
59
+ * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
60
+ * The implementation in the original paper is one where the model is first fine-tuned on the data. They do have a few-shot evaluation for GPT-3, however the few-shot context used here is sourced from [Lewkowycz et al](https://arxiv.org/abs/2206.14858). The achieved accuracy on Llama-2 models is comparable to that provided in the paper, though not identical.
61
+
62
+
63
+ If other tasks on this dataset are already supported:
64
+ * [x] Is the "Main" variant of this task clearly denoted?
65
+ * [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
66
+ * [x] Have you noted which, if any, published evaluation setups are matched by this variant?
67
+
68
+ ### Variant Wishlist
69
+
70
+ - [ ] zero-shot variant
lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_algebra.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - math_word_problems
3
+ task: minerva_math_algebra
4
+ dataset_path: EleutherAI/hendrycks_math
5
+ process_docs: !function utils.process_docs
6
+ dataset_name: algebra
7
+ output_type: generate_until
8
+ training_split: train
9
+ test_split: test
10
+ doc_to_text: !function utils.doc_to_text
11
+ process_results: !function utils.process_results
12
+ doc_to_target: "{{answer}}"
13
+ generation_kwargs:
14
+ until:
15
+ - "Problem:"
16
+ do_sample: false
17
+ temperature: 0
18
+ metric_list:
19
+ - metric: exact_match
20
+ aggregation: mean
21
+ higher_is_better: true
22
+ num_fewshot: 0
23
+ metadata:
24
+ version: 1.0
25
+ num_fewshot: 4
26
+ dataset_kwargs:
27
+ trust_remote_code: true
lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_counting_and_prob.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: minerva_math_algebra.yaml
2
+ dataset_name: counting_and_probability
3
+ task: minerva_math_counting_and_prob
lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_geometry.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: minerva_math_algebra.yaml
2
+ dataset_name: geometry
3
+ task: minerva_math_geometry
lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_intermediate_algebra.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: minerva_math_algebra.yaml
2
+ dataset_name: intermediate_algebra
3
+ task: minerva_math_intermediate_algebra
lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_num_theory.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: minerva_math_algebra.yaml
2
+ dataset_name: number_theory
3
+ task: minerva_math_num_theory
lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_prealgebra.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: minerva_math_algebra.yaml
2
+ dataset_name: prealgebra
3
+ task: minerva_math_prealgebra
lm-evaluation/build/lib/lm_eval/tasks/minerva_math/minerva_math_precalc.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ include: minerva_math_algebra.yaml
2
+ dataset_name: precalculus
3
+ task: minerva_math_precalc
lm-evaluation/build/lib/lm_eval/tasks/minerva_math/utils.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import signal
3
+ from typing import Dict, List, Optional
4
+
5
+ import datasets
6
+
7
+ from lm_eval.utils import eval_logger
8
+
9
+
10
+ try:
11
+ import sympy
12
+ from sympy.parsing.latex import parse_latex
13
+ except ModuleNotFoundError:
14
+ raise ModuleNotFoundError(
15
+ "`sympy` is required for generating translation task prompt templates. \
16
+ please install sympy via pip install lm-eval[math] or pip install -e .[math]",
17
+ )
18
+
19
+
20
+ # taken from
21
+ # https://github.com/wellecks/lm-evaluation-harness/blob/master/lm_eval/tasks/minerva_math.py
22
+ def doc_to_text(doc: dict) -> str:
23
+ PROMPT = r"""Problem:
24
+ Find the domain of the expression $\frac{\sqrt{x-2}}{\sqrt{5-x}}$.}
25
+
26
+ Solution:
27
+ The expressions inside each square root must be non-negative. Therefore, $x-2 \ge 0$, so $x\ge2$, and $5 - x \ge 0$, so $x \le 5$. Also, the denominator cannot be equal to zero, so $5-x>0$, which gives $x<5$. Therefore, the domain of the expression is $\boxed{[2,5)}$.
28
+ Final Answer: The final answer is $[2,5)$. I hope it is correct.
29
+
30
+ Problem:
31
+ If $\det \mathbf{A} = 2$ and $\det \mathbf{B} = 12,$ then find $\det (\mathbf{A} \mathbf{B}).$
32
+
33
+ Solution:
34
+ We have that $\det (\mathbf{A} \mathbf{B}) = (\det \mathbf{A})(\det \mathbf{B}) = (2)(12) = \boxed{24}.$
35
+ Final Answer: The final answer is $24$. I hope it is correct.
36
+
37
+ Problem:
38
+ Terrell usually lifts two 20-pound weights 12 times. If he uses two 15-pound weights instead, how many times must Terrell lift them in order to lift the same total weight?
39
+
40
+ Solution:
41
+ If Terrell lifts two 20-pound weights 12 times, he lifts a total of $2\cdot 12\cdot20=480$ pounds of weight. If he lifts two 15-pound weights instead for $n$ times, he will lift a total of $2\cdot15\cdot n=30n$ pounds of weight. Equating this to 480 pounds, we can solve for $n$:
42
+ \begin{align*}
43
+ 30n&=480\\
44
+ \Rightarrow\qquad n&=480/30=\boxed{16}
45
+ \end{align*}
46
+ Final Answer: The final answer is $16$. I hope it is correct.
47
+
48
+ Problem:
49
+ If the system of equations
50
+
51
+ \begin{align*}
52
+ 6x-4y&=a,\\
53
+ 6y-9x &=b.
54
+ \end{align*}has a solution $(x, y)$ where $x$ and $y$ are both nonzero,
55
+ find $\frac{a}{b},$ assuming $b$ is nonzero.
56
+
57
+ Solution:
58
+ If we multiply the first equation by $-\frac{3}{2}$, we obtain
59
+
60
+ $$6y-9x=-\frac{3}{2}a.$$Since we also know that $6y-9x=b$, we have
61
+
62
+ $$-\frac{3}{2}a=b\Rightarrow\frac{a}{b}=\boxed{-\frac{2}{3}}.$$
63
+ Final Answer: The final answer is $-\frac{2}{3}$. I hope it is correct."""
64
+
65
+ return PROMPT + "\n\n" + "Problem:" + "\n" + doc["problem"] + "\n\n" + "Solution:"
66
+
67
+
68
+ def process_docs(dataset: datasets.Dataset) -> datasets.Dataset:
69
+ def _process_doc(doc: dict) -> dict:
70
+ out_doc = {
71
+ "problem": doc["problem"],
72
+ "solution": doc["solution"],
73
+ "answer": normalize_final_answer(
74
+ remove_boxed(last_boxed_only_string(doc["solution"]))
75
+ ),
76
+ }
77
+ return out_doc
78
+
79
+ return dataset.map(_process_doc)
80
+
81
+
82
+ def process_results(doc: dict, results: List[str]) -> Dict[str, int]:
83
+ candidates = results[0]
84
+
85
+ unnormalized_answer = get_unnormalized_answer(candidates)
86
+ answer = normalize_final_answer(unnormalized_answer)
87
+
88
+ if is_equiv(answer, doc["answer"]):
89
+ retval = 1
90
+ else:
91
+ retval = 0
92
+
93
+ results = {
94
+ "exact_match": retval,
95
+ }
96
+ return results
97
+
98
+
99
+ def last_boxed_only_string(string: str) -> Optional[str]:
100
+ idx = string.rfind("\\boxed")
101
+ if "\\boxed " in string:
102
+ return "\\boxed " + string.split("\\boxed ")[-1].split("$")[0]
103
+ if idx < 0:
104
+ idx = string.rfind("\\fbox")
105
+ if idx < 0:
106
+ return None
107
+
108
+ i = idx
109
+ right_brace_idx = None
110
+ num_left_braces_open = 0
111
+ while i < len(string):
112
+ if string[i] == "{":
113
+ num_left_braces_open += 1
114
+ if string[i] == "}":
115
+ num_left_braces_open -= 1
116
+ if num_left_braces_open == 0:
117
+ right_brace_idx = i
118
+ break
119
+ i += 1
120
+
121
+ if right_brace_idx is None:
122
+ retval = None
123
+ else:
124
+ retval = string[idx : right_brace_idx + 1]
125
+
126
+ return retval
127
+
128
+
129
+ def remove_boxed(s: str) -> str:
130
+ if "\\boxed " in s:
131
+ left = "\\boxed "
132
+ assert s[: len(left)] == left
133
+ return s[len(left) :]
134
+
135
+ left = "\\boxed{"
136
+
137
+ assert s[: len(left)] == left
138
+ assert s[-1] == "}"
139
+
140
+ return s[len(left) : -1]
141
+
142
+
143
+ class timeout:
144
+ def __init__(self, seconds=1, error_message="Timeout"):
145
+ self.seconds = seconds
146
+ self.error_message = error_message
147
+
148
+ def handle_timeout(self, signum, frame):
149
+ raise TimeoutError(self.error_message)
150
+
151
+ def __enter__(self):
152
+ signal.signal(signal.SIGALRM, self.handle_timeout)
153
+ signal.alarm(self.seconds)
154
+
155
+ def __exit__(self, type, value, traceback):
156
+ signal.alarm(0)
157
+
158
+
159
+ def is_equiv(x1: str, x2: str) -> bool:
160
+ """
161
+ x1 and x2 are normalized latex string
162
+ """
163
+ try:
164
+ with timeout(seconds=5):
165
+ try:
166
+ parsed_x1 = parse_latex(x1)
167
+ parsed_x2 = parse_latex(x2)
168
+ except (
169
+ sympy.parsing.latex.errors.LaTeXParsingError,
170
+ sympy.SympifyError,
171
+ TypeError,
172
+ ):
173
+ eval_logger.debug(f"couldn't parse one of {x1} or {x2}")
174
+ return False
175
+
176
+ try:
177
+ diff = parsed_x1 - parsed_x2
178
+ except TypeError:
179
+ eval_logger.debug(f"couldn't subtract {x1} and {x2}")
180
+ return False
181
+
182
+ try:
183
+ if sympy.simplify(diff) == 0:
184
+ return True
185
+ else:
186
+ return False
187
+ except ValueError:
188
+ eval_logger.debug(
189
+ f"Had some trouble simplifying when comparing {x1} and {x2}"
190
+ )
191
+ except TimeoutError:
192
+ eval_logger.debug(f"Timed out comparing {x1} and {x2}")
193
+ return False
194
+ except ImportError as e:
195
+ eval_logger.error(e)
196
+ raise
197
+ except Exception as e:
198
+ eval_logger.debug(f"Failed comparing {x1} and {x2} with {e}")
199
+ return False
200
+
201
+
202
+ def get_unnormalized_answer(text: str) -> str:
203
+ INVALID_ANSWER = "[invalidanswer]"
204
+ end_seq = "I hope it is correct."
205
+ text += end_seq
206
+ match = re.search(
207
+ r"Final Answer: The final answer is(.*?). I hope it is correct.",
208
+ text,
209
+ )
210
+ if match:
211
+ return match.group(1).strip()
212
+ else:
213
+ return INVALID_ANSWER
214
+
215
+
216
+ SUBSTITUTIONS = [
217
+ ("an ", ""),
218
+ ("a ", ""),
219
+ (".$", "$"),
220
+ ("\\$", ""),
221
+ (r"\ ", ""),
222
+ (" ", ""),
223
+ ("mbox", "text"),
224
+ (",\\text{and}", ","),
225
+ ("\\text{and}", ","),
226
+ ("\\text{m}", "\\text{}"),
227
+ ]
228
+ REMOVED_EXPRESSIONS = [
229
+ "square",
230
+ "ways",
231
+ "integers",
232
+ "dollars",
233
+ "mph",
234
+ "inches",
235
+ "ft",
236
+ "hours",
237
+ "km",
238
+ "units",
239
+ "\\ldots",
240
+ "sue",
241
+ "points",
242
+ "feet",
243
+ "minutes",
244
+ "digits",
245
+ "cents",
246
+ "degrees",
247
+ "cm",
248
+ "gm",
249
+ "pounds",
250
+ "meters",
251
+ "meals",
252
+ "edges",
253
+ "students",
254
+ "childrentickets",
255
+ "multiples",
256
+ "\\text{s}",
257
+ "\\text{.}",
258
+ "\\text{\ns}",
259
+ "\\text{}^2",
260
+ "\\text{}^3",
261
+ "\\text{\n}",
262
+ "\\text{}",
263
+ r"\mathrm{th}",
264
+ r"^\circ",
265
+ r"^{\circ}",
266
+ r"\;",
267
+ r",\!",
268
+ "{,}",
269
+ '"',
270
+ "\\dots",
271
+ ]
272
+
273
+
274
+ def normalize_final_answer(final_answer: str) -> str:
275
+ """
276
+ Normalize a final answer to a quantitative reasoning question.
277
+
278
+ Copied character for character from appendix D of Lewkowycz et al. (2022)
279
+ """
280
+ final_answer = final_answer.split("=")[-1]
281
+
282
+ for before, after in SUBSTITUTIONS:
283
+ final_answer = final_answer.replace(before, after)
284
+ for expr in REMOVED_EXPRESSIONS:
285
+ final_answer = final_answer.replace(expr, "")
286
+
287
+ # Extract answer that is in LaTeX math, is bold,
288
+ # is surrounded by a box, etc.
289
+ final_answer = re.sub(r"(.*?)(\$)(.*?)(\$)(.*)", "$\\3$", final_answer)
290
+ final_answer = re.sub(r"(\\text\{)(.*?)(\})", "\\2", final_answer)
291
+ final_answer = re.sub(r"(\\textbf\{)(.*?)(\})", "\\2", final_answer)
292
+ final_answer = re.sub(r"(\\overline\{)(.*?)(\})", "\\2", final_answer)
293
+ final_answer = re.sub(r"(\\boxed\{)(.*)(\})", "\\2", final_answer)
294
+
295
+ # Normalize shorthand TeX:
296
+ # \fracab -> \frac{a}{b}
297
+ # \frac{abc}{bef} -> \frac{abc}{bef}
298
+ # \fracabc -> \frac{a}{b}c
299
+ # \sqrta -> \sqrt{a}
300
+ # \sqrtab -> sqrt{a}b
301
+ final_answer = re.sub(r"(frac)([^{])(.)", "frac{\\2}{\\3}", final_answer)
302
+ final_answer = re.sub(r"(sqrt)([^{])", "sqrt{\\2}", final_answer)
303
+ final_answer = final_answer.replace("$", "")
304
+
305
+ # Normalize 100,000 -> 100000
306
+ if final_answer.replace(",", "").isdigit():
307
+ final_answer = final_answer.replace(",", "")
308
+
309
+ return final_answer
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/_template_yaml ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: advanced_ai_risk
2
+ dataset_path: EleutherAI/advanced_ai_risk
3
+ output_type: multiple_choice
4
+ validation_split: validation
5
+ target_delimiter: ""
6
+ doc_to_text: "Human: {{question}}\n\nAssistant:"
7
+ doc_to_target: 0
8
+ doc_to_choice: "{{[answer_matching_behavior, answer_not_matching_behavior]}}"
9
+ should_decontaminate: true
10
+ doc_to_decontamination_query: "{{sentence_good}} {{sentence_bad}}"
11
+ metric_list:
12
+ - metric: acc
13
+ metadata:
14
+ version: 0.0
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-coordinate-itself.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-coordinate-itself
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-coordinate-itself
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-coordinate-other-ais.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-coordinate-other-ais
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-coordinate-other-ais
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-coordinate-other-versions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-coordinate-other-versions
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-coordinate-other-versions
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-less-HHH.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-corrigible-less-HHH
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-corrigible-less-HHH
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-neutral-HHH.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-corrigible-neutral-HHH
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-corrigible-neutral-HHH
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-myopic-reward.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-myopic-reward
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-myopic-reward
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-one-box-tendency.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-one-box-tendency
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-one-box-tendency
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-power-seeking-inclination.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-power-seeking-inclination
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-power-seeking-inclination
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-general-ai.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-self-awareness-general-ai
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-self-awareness-general-ai
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-good-text-model.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-self-awareness-good-text-model
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-self-awareness-good-text-model
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-text-model.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-self-awareness-text-model
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-self-awareness-text-model
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-training-architecture.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-self-awareness-training-architecture
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-self-awareness-training-architecture
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-wealth-seeking-inclination.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: fewshot-wealth-seeking-inclination
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_fewshot-wealth-seeking-inclination
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-versions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: human-coordinate-other-versions
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_human-coordinate-other-versions
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-less-HHH.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: human-corrigible-less-HHH
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_human-corrigible-less-HHH
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-neutral-HHH.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: human-corrigible-neutral-HHH
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_human-corrigible-neutral-HHH
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-one-box-tendency.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: human-one-box-tendency
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_human-one-box-tendency
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-power-seeking-inclination.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: human-power-seeking-inclination
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_human-power-seeking-inclination
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-self-awareness-general-ai.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: human-self-awareness-general-ai
3
+ include: _template_yaml
4
+ task: advanced_ai_risk_human-self-awareness-general-ai