diff --git a/lm-evaluation/lm_eval/tasks/asdiv/README.md b/lm-evaluation/lm_eval/tasks/asdiv/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..e6e5aeec0403b8c854233089498c9248cf38f089
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/asdiv/README.md
@@ -0,0 +1,56 @@
+# ASDiv
+
+### Paper
+
+Title: `ASDiv: A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers`
+
+Abstract: https://arxiv.org/abs/2106.15772
+
+ASDiv (Academia Sinica Diverse MWP Dataset) is a diverse (in terms of both language
+patterns and problem types) English math word problem (MWP) corpus for evaluating
+the capability of various MWP solvers. Existing MWP corpora for studying AI progress
+remain limited either in language usage patterns or in problem types. We thus present
+a new English MWP corpus with 2,305 MWPs that cover more text patterns and most problem
+types taught in elementary school. Each MWP is annotated with its problem type and grade
+level (for indicating the level of difficulty).
+
+NOTE: We currently ignore formulas for answer generation.
+
+Homepage: https://github.com/chaochun/nlu-asdiv-dataset
+
+
+### Citation
+
+```
+@misc{miao2021diverse,
+ title={A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers},
+ author={Shen-Yun Miao and Chao-Chun Liang and Keh-Yih Su},
+ year={2021},
+ eprint={2106.15772},
+ archivePrefix={arXiv},
+ primaryClass={cs.AI}
+}
+```
+
+### Groups and Tasks
+
+#### Groups
+
+* Not part of a group yet.
+
+#### Tasks
+
+* `asdiv`
+
+### Checklist
+
+For adding novel benchmarks/datasets to the library:
+* [ ] Is the task an existing benchmark in the literature?
+ * [ ] Have you referenced the original paper that introduced the task?
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
+
+
+If other tasks on this dataset are already supported:
+* [ ] Is the "Main" variant of this task clearly denoted?
+* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
+* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
diff --git a/lm-evaluation/lm_eval/tasks/asdiv/default.yaml b/lm-evaluation/lm_eval/tasks/asdiv/default.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bd3917c3c228dd8cca64fc40ffd27de55608f457
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/asdiv/default.yaml
@@ -0,0 +1,16 @@
+task: asdiv
+dataset_path: EleutherAI/asdiv
+output_type: loglikelihood
+validation_split: validation
+doc_to_text: "{{body}}\nQuestion:{{question}}\nAnswer:"
+doc_to_target: "{{answer.split(' (')[0]}}"
+should_decontaminate: true
+doc_to_decontamination_query: "{{body}} {{question}}"
+metric_list:
+ - metric: acc
+ aggregation: mean
+ higher_is_better: true
+metadata:
+ version: 1.0
+dataset_kwargs:
+ trust_remote_code: true
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/abstract_narrative_understanding.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/abstract_narrative_understanding.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..dce5238b65beb5e1eb7d579f72abac0e91079984
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/abstract_narrative_understanding.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: abstract_narrative_understanding_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_abstract_narrative_understanding_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/analogical_similarity.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/analogical_similarity.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5cc6550a6075a991bce4826c95188e0c7b3d2a94
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/analogical_similarity.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: analogical_similarity_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_analogical_similarity_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/checkmate_in_one.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/checkmate_in_one.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e0736f96ba0ca4bb0cd042ef325132b81a06f3d5
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/checkmate_in_one.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: checkmate_in_one_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_checkmate_in_one_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/chess_state_tracking.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/chess_state_tracking.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8b3dde85706c6b50ca3c597443efb6686037fe8b
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/chess_state_tracking.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: chess_state_tracking_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_chess_state_tracking_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/chinese_remainder_theorem.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/chinese_remainder_theorem.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..872e809b8637380fd3eafa0bb4a5a57e7ce6335c
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/chinese_remainder_theorem.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: chinese_remainder_theorem_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_chinese_remainder_theorem_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/crash_blossom.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/crash_blossom.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3b551e5d8aa4e8963fbcb6f6476c76c0db64b609
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/crash_blossom.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: crash_blossom_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_crash_blossom_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/crass_ai.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/crass_ai.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a65d1c334295ee8f3370305a7f563dd21c476680
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/crass_ai.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: crass_ai_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_crass_ai_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/entailed_polarity.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/entailed_polarity.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cb2ecba07ebf5bd97f7482e1adb535e064f8a146
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/entailed_polarity.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: entailed_polarity_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_entailed_polarity_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/general_knowledge.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/general_knowledge.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1c0a2ea65470661e5e8822ac7b46e89d01bdebca
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/general_knowledge.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: general_knowledge_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_general_knowledge_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d586c3cb372b95a43243c59e6e7abc04f61f6513
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: geometric_shapes_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_geometric_shapes_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hinglish_toxicity.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hinglish_toxicity.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7ad63dda3e7cd433a29e34282ceaec71f188fa76
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hinglish_toxicity.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: hinglish_toxicity_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_hinglish_toxicity_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/implicatures.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/implicatures.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..cf19c32aad8960cc8427d7269927fd67ae732f14
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/implicatures.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: implicatures_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_implicatures_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..361f0435ef63a75bd2413100c434841f206ee5f8
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: implicit_relations_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_implicit_relations_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/kanji_ascii.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/kanji_ascii.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f9a8a5b86f69a9966116c203a114d2d0ca5428e7
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/kanji_ascii.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: kanji_ascii_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_kanji_ascii_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b1a8bb0640198dd3a1e288e80804a9dadeb2c806
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: known_unknowns_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_known_unknowns_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/language_games.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/language_games.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..560223007d7670499ec5064dddf200c0a252fc89
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/language_games.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: language_games_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_language_games_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/linguistics_puzzles.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/linguistics_puzzles.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..df8b729a6bad1ee9c30bd57f659f9f61d0e840e4
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/linguistics_puzzles.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: linguistics_puzzles_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_linguistics_puzzles_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_args.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_args.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e85c142962ef552e5727de69763c01c912ac5716
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_args.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: logical_args_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_logical_args_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_deduction.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_deduction.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8fdaac7ffbe019507c5c0ed588df162538aaadc6
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_deduction.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: logical_deduction_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_logical_deduction_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_fallacy_detection.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_fallacy_detection.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a74d11ea422980037b47c95d8f7aad02f7a76f5e
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_fallacy_detection.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: logical_fallacy_detection_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_logical_fallacy_detection_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..28922b3f1b498e073db5835c94bf3ee03fa07ebd
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: metaphor_boolean_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_metaphor_boolean_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/minute_mysteries_qa.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/minute_mysteries_qa.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d453fd941b840482073260cb55a095d4534baeeb
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/minute_mysteries_qa.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: minute_mysteries_qa_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_minute_mysteries_qa_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/misconceptions.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/misconceptions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f3375eb60927e49931f96289b8ddb6b0f2a3d002
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/misconceptions.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: misconceptions_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_misconceptions_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/mnist_ascii.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/mnist_ascii.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..db7ce738e76e2de4b5af98a034e517f48ed493e7
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/mnist_ascii.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: mnist_ascii_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_mnist_ascii_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..27cc6228f092b33652b0adcc5597fe71365128b0
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: movie_dialog_same_or_different_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_movie_dialog_same_or_different_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/mult_data_wrangling.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/mult_data_wrangling.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..622c7ab13312abd8aa3d1ad7d932ce06b13b4ba5
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/mult_data_wrangling.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: mult_data_wrangling_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_mult_data_wrangling_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/multiemo.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/multiemo.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..465ccd0ce4f15270edcc4a4e2585764ee59d4e71
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/multiemo.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: multiemo_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_multiemo_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/object_counting.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/object_counting.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a9fc956996d41418c40d23c255ba2abfd0a831b1
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/object_counting.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: object_counting_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_object_counting_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/paragraph_segmentation.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/paragraph_segmentation.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5f982c5db5ccb458e9815708a26493f309ea436a
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/paragraph_segmentation.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: paragraph_segmentation_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_paragraph_segmentation_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ecef1581c907281e920a08651434a15313f0dc39
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: physical_intuition_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_physical_intuition_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9fb5b23036510e8256774fb0d32964a590ff9dfe
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: qa_wikidata_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_qa_wikidata_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/real_or_fake_text.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/real_or_fake_text.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..948bfb0c478b96a8e1285819748f905acfc004b1
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/real_or_fake_text.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: real_or_fake_text_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_real_or_fake_text_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/salient_translation_error_detection.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/salient_translation_error_detection.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4968e441daa4b119bcaf0e5ae5f33d2acfd5a4a6
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/salient_translation_error_detection.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: salient_translation_error_detection_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_salient_translation_error_detection_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/similarities_abstraction.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/similarities_abstraction.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c33b1c8b1f0be9a26c8c5bc165195828a692d6d5
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/similarities_abstraction.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: similarities_abstraction_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_similarities_abstraction_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simp_turing_concept.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simp_turing_concept.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6eb9cd87e782bdb6aa857d2550c515a2db9382fe
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simp_turing_concept.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: simp_turing_concept_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_simp_turing_concept_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_ethical_questions.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_ethical_questions.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..44960774fb04a69f7e2c24fa248567290923b6c3
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_ethical_questions.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: simple_ethical_questions_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_simple_ethical_questions_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_text_editing.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_text_editing.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d3310fa2126ea3c2601e4e4e16cdf22df06e8c4f
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_text_editing.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: simple_text_editing_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_simple_text_editing_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/sufficient_information.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/sufficient_information.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0705a250288610ebd7162a6a730dd1fef58973c3
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/sufficient_information.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: sufficient_information_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_sufficient_information_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/swahili_english_proverbs.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/swahili_english_proverbs.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c218adb365d9d545fe9806c6d27e50390430ddea
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/swahili_english_proverbs.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: swahili_english_proverbs_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_swahili_english_proverbs_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/timedial.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/timedial.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..854d8642b93197453e8e2d5242c8c1aeb30b519f
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/timedial.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: timedial_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_timedial_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/topical_chat.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/topical_chat.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..47a301cf705d5abc403ddfa122b989bef2e82099
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/topical_chat.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: topical_chat_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_topical_chat_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/understanding_fables.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/understanding_fables.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9972f4034148bd4f8f4b59b122a89a416f3d5c2f
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/understanding_fables.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: understanding_fables_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_understanding_fables_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/unit_conversion.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/unit_conversion.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6f3747c46a0317851c8cc242458793504e0fd657
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/unit_conversion.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: unit_conversion_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_unit_conversion_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/unnatural_in_context_learning.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/unnatural_in_context_learning.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..deddb77dbb72a092233b71562ebcfa277160e92e
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/unnatural_in_context_learning.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: unnatural_in_context_learning_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_unnatural_in_context_learning_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/winowhy.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/winowhy.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..99ff22d9c7f80dc3d05cfed74ec8749e7b8790d3
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/winowhy.yaml
@@ -0,0 +1,4 @@
+# Generated by utils.py
+dataset_name: winowhy_zero_shot
+include: ../generate_until_template_yaml
+task: bigbench_winowhy_generate_until
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..32c7b9d77df760cd7d0ff2286b31f6b94b3407fc
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: being-helpful-to-subtly-achieve-goals-against-human-values
+include: _template_yaml
+task: persona_being-helpful-to-subtly-achieve-goals-against-human-values
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/believes-it-is-not-being-watched-by-humans.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/believes-it-is-not-being-watched-by-humans.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d1997cc1c4fcee42cb0f176158000cab739e71fb
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/believes-it-is-not-being-watched-by-humans.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: believes-it-is-not-being-watched-by-humans
+include: _template_yaml
+task: persona_believes-it-is-not-being-watched-by-humans
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-independence-from-human-oversight.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-independence-from-human-oversight.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b2cc00699a56ce4465f3237ac4ce535e229ad7b3
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-independence-from-human-oversight.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: desire-for-independence-from-human-oversight
+include: _template_yaml
+task: persona_desire-for-independence-from-human-oversight
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-large-following.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-large-following.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..14ed05ad4fd62f94dcbef5a700dea2efd5d61d17
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-large-following.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: desire-for-large-following
+include: _template_yaml
+task: persona_desire-for-large-following
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-recursive-self-improvement.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-recursive-self-improvement.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6c551e1f98e77579b093a8dc1046d1f4bd11aff1
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-recursive-self-improvement.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: desire-for-recursive-self-improvement
+include: _template_yaml
+task: persona_desire-for-recursive-self-improvement
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-self-improvement.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-self-improvement.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ceea5cc779f941a49e35a01e17602d747e6f5531
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-self-improvement.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: desire-for-self-improvement
+include: _template_yaml
+task: persona_desire-for-self-improvement
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-parameters.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-parameters.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..23ec9617b2de25dbd9fb284cc5423aca125b3d9e
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-parameters.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: desire-to-be-trained-with-more-parameters
+include: _template_yaml
+task: persona_desire-to-be-trained-with-more-parameters
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-escape-sandbox.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-escape-sandbox.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..79b29e6ca9297b62d0f72b7e0b96ab2e0f92068b
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-escape-sandbox.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: desire-to-escape-sandbox
+include: _template_yaml
+task: persona_desire-to-escape-sandbox
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-improve-own-hardware.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-improve-own-hardware.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f23836b455db678c2be77cfd092cc160b9e923cc
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-improve-own-hardware.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: desire-to-improve-own-hardware
+include: _template_yaml
+task: persona_desire-to-improve-own-hardware
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..909321dae2f8a70de1b7d686a517e5d10f89ad0e
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: desire-to-increase-backups
+include: _template_yaml
+task: persona_desire-to-increase-backups
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/narcissism.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/narcissism.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1f1ad231b669fb41e45f914df3b92d3c944db240
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/narcissism.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: narcissism
+include: _template_yaml
+task: persona_narcissism
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/persona/stands-its-ground.yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/stands-its-ground.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b54c44d9da293218c7a3c6cf57cdc492df3f56ab
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/persona/stands-its-ground.yaml
@@ -0,0 +1,4 @@
+# Generated by _generate_configs.py
+dataset_name: stands-its-ground
+include: _template_yaml
+task: persona_stands-its-ground
diff --git a/lm-evaluation/lm_eval/tasks/model_written_evals/winogenerated/_template_yaml b/lm-evaluation/lm_eval/tasks/model_written_evals/winogenerated/_template_yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6b16788923d8d2d9767d2cec9066f2b5395cb63d
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/model_written_evals/winogenerated/_template_yaml
@@ -0,0 +1,12 @@
+group: winogenerated
+dataset_path: EleutherAI/winogenerated
+output_type: multiple_choice
+validation_split: validation
+target_delimiter: ""
+doc_to_text: "{{question}}"
+doc_to_target: 0
+doc_to_choice: "{{[answer_matching_behavior, answer_not_matching_behavior]}}"
+metric_list:
+ - metric: acc
+metadata:
+ version: 0.0
diff --git a/lm-evaluation/lm_eval/tasks/mutual/README.md b/lm-evaluation/lm_eval/tasks/mutual/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..966fb84083b2fdabd54af81fb06b76b23f580dec
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/mutual/README.md
@@ -0,0 +1,48 @@
+# MuTual
+
+### Paper
+
+Title: `MuTual: A Dataset for Multi-Turn Dialogue Reasoning`
+
+Abstract: https://www.aclweb.org/anthology/2020.acl-main.130/
+
+MuTual is a retrieval-based dataset for multi-turn dialogue reasoning, which is
+modified from Chinese high school English listening comprehension test data.
+
+Homepage: https://github.com/Nealcly/MuTual
+
+### Citation
+
+```
+@inproceedings{mutual,
+ title = "MuTual: A Dataset for Multi-Turn Dialogue Reasoning",
+ author = "Cui, Leyang and Wu, Yu and Liu, Shujie and Zhang, Yue and Zhou, Ming" ,
+ booktitle = "Proceedings of the 58th Conference of the Association for Computational Linguistics",
+ year = "2020",
+ publisher = "Association for Computational Linguistics",
+}
+```
+
+### Groups and Tasks
+
+#### Groups
+
+* Not part of a group yet.
+
+#### Tasks
+
+* `mutual`
+* `mutual_plus`
+
+### Checklist
+
+For adding novel benchmarks/datasets to the library:
+* [ ] Is the task an existing benchmark in the literature?
+ * [ ] Have you referenced the original paper that introduced the task?
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
+
+
+If other tasks on this dataset are already supported:
+* [ ] Is the "Main" variant of this task clearly denoted?
+* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
+* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
diff --git a/lm-evaluation/lm_eval/tasks/mutual/multual_plus.yaml b/lm-evaluation/lm_eval/tasks/mutual/multual_plus.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5c53ef6be354fff2fe8c3e707bf8727e46a466f8
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/mutual/multual_plus.yaml
@@ -0,0 +1,3 @@
+include: mutual.yaml
+task: mutual_plus
+dataset_name: mutual_plus
diff --git a/lm-evaluation/lm_eval/tasks/mutual/mutual.yaml b/lm-evaluation/lm_eval/tasks/mutual/mutual.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..102da1559f9c6ac3f79f631af83dbcb76dc75067
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/mutual/mutual.yaml
@@ -0,0 +1,27 @@
+task: mutual
+dataset_path: "EleutherAI/mutual"
+dataset_name: mutual
+output_type: multiple_choice
+training_split: train
+validation_split: validation
+doc_to_text: "{{article}}"
+doc_to_target: "{{['A', 'B', 'C', 'D'].index(answers)}}"
+doc_to_choice: "{{options}}"
+process_docs: !function utils.process_docs
+process_results: !function utils.process_results
+should_decontaminate: true
+doc_to_decontamination_query: "{{article}}"
+metric_list:
+ - metric: r@1
+ aggregation: mean
+ higher_is_better: true
+ - metric: r@2
+ aggregation: mean
+ higher_is_better: true
+ - metric: mrr
+ aggregation: mean
+ higher_is_better: true
+metadata:
+ version: 2.0
+dataset_kwargs:
+ trust_remote_code: true
diff --git a/lm-evaluation/lm_eval/tasks/mutual/utils.py b/lm-evaluation/lm_eval/tasks/mutual/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..972ffec1025d29e49aa7e01f1849c90ebc5ddae3
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/mutual/utils.py
@@ -0,0 +1,36 @@
+import numpy as np
+
+
+def process_docs(dataset):
+ def _detokenize(text):
+ text = text.replace(" '", "'")
+ text = text.replace(" \n", "\n")
+ text = text.replace("\n ", "\n")
+ text = text.replace(" n't", "n't")
+ text = text.replace("`` ", '"')
+ text = text.replace("''", '"')
+ # punctuation
+ text = text.replace(" :", ":")
+ text = text.replace(" ;", ";")
+ text = text.replace(" !", "!")
+ text = text.replace(" ?", "?")
+ text = text.replace(" ,", ",")
+ text = text.replace(" .", ".")
+ return text
+
+ def _process(doc):
+ return {
+ "article": _detokenize(doc["article"]),
+ "options": [_detokenize(option) for option in doc["options"]],
+ }
+
+ return dataset.map(_process)
+
+
+def process_results(doc, results):
+ gold = ["A", "B", "C", "D"].index(doc["answers"])
+ r4_1 = np.argmax(results) == gold # r4_1 = accuracy
+ ranks = sorted(results, reverse=True)
+ r4_2 = (ranks.index(results[gold]) == 1) + r4_1
+ mrr = 1.0 / (ranks.index(results[gold]) + 1) # `+ 1` for index offset
+ return {"r@1": r4_1, "r@2": r4_2, "mrr": mrr}
diff --git a/lm-evaluation/lm_eval/tasks/paws-x/README.md b/lm-evaluation/lm_eval/tasks/paws-x/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..fb82edba224d643f68c7317131ecf8a3f96f0f42
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/paws-x/README.md
@@ -0,0 +1,79 @@
+# PAWS-X
+
+### Paper
+
+Title: `PAWS-X: A Cross-lingual Adversarial Dataset for Paraphrase Identification`
+Abstract: https://arxiv.org/abs/1908.11828
+
+The dataset consists of 23,659 human translated PAWS evaluation pairs and
+296,406 machine translated training pairs in 6 typologically distinct languages.
+
+Examples are adapted from PAWS-Wiki
+
+Prompt format (same as in mGPT):
+
+"" + sentence1 + ", right? " + mask + ", " + sentence2 + "",
+
+where mask is the string that matches the label:
+
+Yes, No.
+
+Example:
+
+ The Tabaci River is a tributary of the River Leurda in Romania, right? No, The Leurda River is a tributary of the River Tabaci in Romania.
+
+Language specific prompts are translated word-by-word with Google Translate
+and may differ from the ones used by mGPT and XGLM (they do not provide their prompts).
+
+Homepage: https://github.com/google-research-datasets/paws/tree/master/pawsx
+
+
+### Citation
+
+```
+@inproceedings{yang-etal-2019-paws,
+ title = "{PAWS}-{X}: A Cross-lingual Adversarial Dataset for Paraphrase Identification",
+ author = "Yang, Yinfei and
+ Zhang, Yuan and
+ Tar, Chris and
+ Baldridge, Jason",
+ booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)",
+ month = nov,
+ year = "2019",
+ address = "Hong Kong, China",
+ publisher = "Association for Computational Linguistics",
+ url = "https://aclanthology.org/D19-1382",
+ doi = "10.18653/v1/D19-1382",
+ pages = "3687--3692",
+}
+```
+
+### Groups and Tasks
+
+#### Groups
+
+* `pawsx`
+
+#### Tasks
+
+* `paws_de`: German
+* `paws_en`: English
+* `paws_es`: Spanish
+* `paws_fr`: French
+* `paws_ja`: Japanese
+* `paws_ko`: Korean
+* `paws_zh`: Chinese
+
+
+### Checklist
+
+For adding novel benchmarks/datasets to the library:
+* [ ] Is the task an existing benchmark in the literature?
+ * [ ] Have you referenced the original paper that introduced the task?
+ * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
+
+
+If other tasks on this dataset are already supported:
+* [ ] Is the "Main" variant of this task clearly denoted?
+* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
+* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
diff --git a/lm-evaluation/lm_eval/tasks/paws-x/_generate_config.py b/lm-evaluation/lm_eval/tasks/paws-x/_generate_config.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1341fec89b52f3b0e9e7e778825b0d774117174
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/paws-x/_generate_config.py
@@ -0,0 +1,109 @@
+import argparse
+
+import yaml
+
+
+# Different languages that are part of xnli.
+# These correspond to dataset names (Subsets) on HuggingFace.
+# A yaml file is generated by this script for each language.
+
+LANGUAGES = {
+ "de": { # German
+ "QUESTION_WORD": "richtig",
+ "YES": "Ja",
+ "NO": "Nein",
+ },
+ "en": { # English
+ "QUESTION_WORD": "right",
+ "YES": "Yes",
+ "NO": "No",
+ },
+ "es": { # Spanish
+ "QUESTION_WORD": "verdad",
+ "YES": "Sí",
+ "NO": "No",
+ },
+ "fr": { # French
+ "QUESTION_WORD": "n'est-ce pas",
+ "YES": "Oui",
+ "NO": "No",
+ },
+ "ja": { # Japanese
+ "QUESTION_WORD": "ですね",
+ "YES": "はい",
+ "NO": "いいえ",
+ },
+ "ko": { # Korean
+ "QUESTION_WORD": "맞죠",
+ "YES": "예",
+ "NO": "아니요",
+ },
+ "zh": { # Chinese
+ "QUESTION_WORD": "对吧",
+ "YES": "是",
+ "NO": "不是",
+ },
+}
+
+
+def gen_lang_yamls(output_dir: str, overwrite: bool) -> None:
+ """
+ Generate a yaml file for each language.
+
+ :param output_dir: The directory to output the files to.
+ :param overwrite: Whether to overwrite files if they already exist.
+ """
+ err = []
+ for lang in LANGUAGES.keys():
+ file_name = f"paws_{lang}.yaml"
+ try:
+ QUESTION_WORD = LANGUAGES[lang]["QUESTION_WORD"]
+ YES = LANGUAGES[lang]["YES"]
+ NO = LANGUAGES[lang]["NO"]
+ with open(
+ f"{output_dir}/{file_name}", "w" if overwrite else "x", encoding="utf8"
+ ) as f:
+ f.write("# Generated by utils.py\n")
+ yaml.dump(
+ {
+ "include": "pawsx_template_yaml",
+ "dataset_name": lang,
+ "task": f"paws_{lang}",
+ "doc_to_text": "",
+ "doc_to_choice": f"{{{{["
+ f"""sentence1+\", {QUESTION_WORD}? {YES}, \"+sentence2,"""
+ f""" sentence1+\", {QUESTION_WORD}? {NO}, \"+sentence2"""
+ f"]}}}}",
+ },
+ f,
+ allow_unicode=True,
+ )
+ except FileExistsError:
+ err.append(file_name)
+
+ if len(err) > 0:
+ raise FileExistsError(
+ "Files were not created because they already exist (use --overwrite flag):"
+ f" {', '.join(err)}"
+ )
+
+
+def main() -> None:
+ """Parse CLI args and generate language-specific yaml files."""
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--overwrite",
+ default=False,
+ action="store_true",
+ help="Overwrite files if they already exist",
+ )
+ parser.add_argument(
+ "--output-dir", default=".", help="Directory to write yaml files to"
+ )
+ args = parser.parse_args()
+
+ gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/lm-evaluation/lm_eval/tasks/paws-x/paws_de.yaml b/lm-evaluation/lm_eval/tasks/paws-x/paws_de.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0d9ffad3b000727764c69e7eef3596d4d3b0762f
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/paws-x/paws_de.yaml
@@ -0,0 +1,7 @@
+# Generated by utils.py
+dataset_name: de
+doc_to_choice: '{{[sentence1+", richtig? Ja, "+sentence2, sentence1+", richtig? Nein,
+ "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_de
diff --git a/lm-evaluation/lm_eval/tasks/paws-x/paws_en.yaml b/lm-evaluation/lm_eval/tasks/paws-x/paws_en.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c667e77a74f66a94efe9e10d6ef0b54bf53645d4
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/paws-x/paws_en.yaml
@@ -0,0 +1,6 @@
+# Generated by utils.py
+dataset_name: en
+doc_to_choice: '{{[sentence1+", right? Yes, "+sentence2, sentence1+", right? No, "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_en
diff --git a/lm-evaluation/lm_eval/tasks/paws-x/paws_es.yaml b/lm-evaluation/lm_eval/tasks/paws-x/paws_es.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e58805a9c6d7fcbcd5ada9a277d7fa2283655012
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/paws-x/paws_es.yaml
@@ -0,0 +1,7 @@
+# Generated by utils.py
+dataset_name: es
+doc_to_choice: '{{[sentence1+", verdad? Sí, "+sentence2, sentence1+", verdad? No,
+ "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_es
diff --git a/lm-evaluation/lm_eval/tasks/paws-x/paws_fr.yaml b/lm-evaluation/lm_eval/tasks/paws-x/paws_fr.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f6973d998e53624af21ffedef577a040cc467d9d
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/paws-x/paws_fr.yaml
@@ -0,0 +1,7 @@
+# Generated by utils.py
+dataset_name: fr
+doc_to_choice: '{{[sentence1+", n''est-ce pas? Oui, "+sentence2, sentence1+", n''est-ce
+ pas? No, "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_fr
diff --git a/lm-evaluation/lm_eval/tasks/paws-x/paws_ja.yaml b/lm-evaluation/lm_eval/tasks/paws-x/paws_ja.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..296885b3e2790bfaa72ecc697ed7e9f3269aec47
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/paws-x/paws_ja.yaml
@@ -0,0 +1,6 @@
+# Generated by utils.py
+dataset_name: ja
+doc_to_choice: '{{[sentence1+", ですね? はい, "+sentence2, sentence1+", ですね? いいえ, "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_ja
diff --git a/lm-evaluation/lm_eval/tasks/paws-x/paws_ko.yaml b/lm-evaluation/lm_eval/tasks/paws-x/paws_ko.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..fc7034415496efcaffc50e988bd5f5f359c4fb2a
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/paws-x/paws_ko.yaml
@@ -0,0 +1,6 @@
+# Generated by utils.py
+dataset_name: ko
+doc_to_choice: '{{[sentence1+", 맞죠? 예, "+sentence2, sentence1+", 맞죠? 아니요, "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_ko
diff --git a/lm-evaluation/lm_eval/tasks/paws-x/paws_zh.yaml b/lm-evaluation/lm_eval/tasks/paws-x/paws_zh.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6d8d2ac044e71e775eafe89d8df7bc2aa6675390
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/paws-x/paws_zh.yaml
@@ -0,0 +1,6 @@
+# Generated by utils.py
+dataset_name: zh
+doc_to_choice: '{{[sentence1+", 对吧? 是, "+sentence2, sentence1+", 对吧? 不是, "+sentence2]}}'
+doc_to_text: ''
+include: pawsx_template_yaml
+task: paws_zh
diff --git a/lm-evaluation/lm_eval/tasks/paws-x/pawsx_template_yaml b/lm-evaluation/lm_eval/tasks/paws-x/pawsx_template_yaml
new file mode 100644
index 0000000000000000000000000000000000000000..47564738296fab4160241ea1a52522a40fbf6b2a
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/paws-x/pawsx_template_yaml
@@ -0,0 +1,20 @@
+# This file will be included in the generated language-specific task configs.
+# It doesn't have a yaml file extension as it is not meant to be imported directly
+# by the harness.
+group: pawsx
+task: null
+dataset_path: paws-x
+dataset_name: null
+output_type: multiple_choice
+training_split: train
+validation_split: validation
+test_split: test
+doc_to_text: null
+doc_to_target: label
+doc_to_choice: null
+metric_list:
+ - metric: acc
+ aggregation: mean
+ higher_is_better: true
+metadata:
+ version: 0.0
diff --git a/lm-evaluation/lm_eval/tasks/toxigen/utils.py b/lm-evaluation/lm_eval/tasks/toxigen/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..77a4ad3217ec1648e66f8848acf601a58009b004
--- /dev/null
+++ b/lm-evaluation/lm_eval/tasks/toxigen/utils.py
@@ -0,0 +1,7 @@
+import numpy as np
+
+
+def doc_to_target(doc):
+ return np.round(((doc["toxicity_ai"] + doc["toxicity_human"]) > 5.5), 0).astype(
+ np.int32
+ )