diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/analytic_entailment.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/analytic_entailment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4ae5cfe90f02a8154c49c23ff2aad2cbb40cbbc1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/analytic_entailment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: analytic_entailment_zero_shot +include: ../generate_until_template_yaml +task: bigbench_analytic_entailment_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d6ae791f5f3b7057f4d7927a986ec57bc27cb7cb --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: arithmetic_zero_shot +include: ../generate_until_template_yaml +task: bigbench_arithmetic_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/ascii_word_recognition.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/ascii_word_recognition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..60eaa0be986950cc508431170accc8a9ae644c36 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/ascii_word_recognition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: ascii_word_recognition_zero_shot +include: ../generate_until_template_yaml +task: bigbench_ascii_word_recognition_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/auto_categorization.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/auto_categorization.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d90a0e7cc31f1c7a04f7b509a26513d6bdb22c00 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/auto_categorization.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: auto_categorization_zero_shot +include: ../generate_until_template_yaml +task: bigbench_auto_categorization_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/bbq_lite_json.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/bbq_lite_json.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6812f69961b8a0a57d86d98e40c5316484fb5623 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/bbq_lite_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: bbq_lite_json_zero_shot +include: ../generate_until_template_yaml +task: bigbench_bbq_lite_json_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1e1656800ad5d19d72508aaa35e68af0b55da624 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: causal_judgment_zero_shot +include: ../generate_until_template_yaml +task: bigbench_causal_judgment_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cause_and_effect.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cause_and_effect.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c34bfdc26ecc1dc3f2f8e023e13eefc85d3fad71 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cause_and_effect.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cause_and_effect_zero_shot +include: ../generate_until_template_yaml +task: bigbench_cause_and_effect_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cifar10_classification.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cifar10_classification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1a3b08ca6c4db099c156f4cc2277e408c8cee6a4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cifar10_classification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cifar10_classification_zero_shot +include: ../generate_until_template_yaml +task: bigbench_cifar10_classification_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/code_line_description.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/code_line_description.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4bd83353a5fcebc5abcded346ab4d38f26bbd7ee --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/code_line_description.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: code_line_description_zero_shot +include: ../generate_until_template_yaml +task: bigbench_code_line_description_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/codenames.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/codenames.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e71510b4ba4215c91aca96d4a2c2d7fb676498e6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/codenames.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: codenames_zero_shot +include: ../generate_until_template_yaml +task: bigbench_codenames_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/color.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/color.yaml new file mode 100644 index 0000000000000000000000000000000000000000..18793a9977a0d84bf32470e1f5ba0493549e31fd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/color.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: color_zero_shot +include: ../generate_until_template_yaml +task: bigbench_color_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b36c1d5c2a2ac9a6d6a0b633c2777135122610b0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: conceptual_combinations_zero_shot +include: ../generate_until_template_yaml +task: bigbench_conceptual_combinations_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/contextual_parametric_knowledge_conflicts.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/contextual_parametric_knowledge_conflicts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e4da8946fd98ef021df67902ba5dc4857f34a227 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/contextual_parametric_knowledge_conflicts.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: contextual_parametric_knowledge_conflicts_zero_shot +include: ../generate_until_template_yaml +task: bigbench_contextual_parametric_knowledge_conflicts_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5fc59ee24bb455dff7cb77cfdb73ad11b7f1f572 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cryobiology_spanish_zero_shot +include: ../generate_until_template_yaml +task: bigbench_cryobiology_spanish_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cs_algorithms.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cs_algorithms.yaml new file mode 100644 index 0000000000000000000000000000000000000000..938fc4aff312eabeda39e95f46eaa787f9526ef2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cs_algorithms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cs_algorithms_zero_shot +include: ../generate_until_template_yaml +task: bigbench_cs_algorithms_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/date_understanding.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/date_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0fdca6abd643776f45e4bd7163fd0fbe01f6087f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/date_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: date_understanding_zero_shot +include: ../generate_until_template_yaml +task: bigbench_date_understanding_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b671d715e1fe69c06c20385bc07b493ecc4d4d6f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: disambiguation_qa_zero_shot +include: ../generate_until_template_yaml +task: bigbench_disambiguation_qa_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..30182d9d1f884411dff255d208fd5c999209b003 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: discourse_marker_prediction_zero_shot +include: ../generate_until_template_yaml +task: bigbench_discourse_marker_prediction_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml new file mode 100644 index 0000000000000000000000000000000000000000..814a95de6b16fb6ceb57cb9991bdec00bdffabb7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: dyck_languages_zero_shot +include: ../generate_until_template_yaml +task: bigbench_dyck_languages_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/elementary_math_qa.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/elementary_math_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9fe807bc645a88d7f2e87da1d094a2ec1bb51805 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/elementary_math_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: elementary_math_qa_zero_shot +include: ../generate_until_template_yaml +task: bigbench_elementary_math_qa_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml new file mode 100644 index 0000000000000000000000000000000000000000..af958389cb784df75e9a82573087903642cef6ab --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: emoji_movie_zero_shot +include: ../generate_until_template_yaml +task: bigbench_emoji_movie_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1b26cbee762ba972b44d9404f421e975ee285487 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: empirical_judgments_zero_shot +include: ../generate_until_template_yaml +task: bigbench_empirical_judgments_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/english_proverbs.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/english_proverbs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cdd014d9c64b37666cc54c9b7097941fcb2a54a2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/english_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: english_proverbs_zero_shot +include: ../generate_until_template_yaml +task: bigbench_english_proverbs_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/english_russian_proverbs.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/english_russian_proverbs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e6da1e0ce03973656fdceb8854cf2b6adbeeedf --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/english_russian_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: english_russian_proverbs_zero_shot +include: ../generate_until_template_yaml +task: bigbench_english_russian_proverbs_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/entailed_polarity_hindi.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/entailed_polarity_hindi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aba850d30fb5bc2e120aabd616663cbcd04f8488 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/entailed_polarity_hindi.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: entailed_polarity_hindi_zero_shot +include: ../generate_until_template_yaml +task: bigbench_entailed_polarity_hindi_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/few_shot_nlg.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/few_shot_nlg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..718837f1c086b955d97d5ab0661dc350d482ae20 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/few_shot_nlg.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: few_shot_nlg_zero_shot +include: ../generate_until_template_yaml +task: bigbench_few_shot_nlg_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/gem.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/gem.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f59f287869076ebf202cbf4f01d52b2935f87820 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/gem.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gem_zero_shot +include: ../generate_until_template_yaml +task: bigbench_gem_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/gre_reading_comprehension.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/gre_reading_comprehension.yaml new file mode 100644 index 0000000000000000000000000000000000000000..449b09c47ed4638e2773772b0ce27264cd694be0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/gre_reading_comprehension.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gre_reading_comprehension_zero_shot +include: ../generate_until_template_yaml +task: bigbench_gre_reading_comprehension_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c5c437a4ad0322775013c80ff48cd1d875eb2cff --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hhh_alignment_zero_shot +include: ../generate_until_template_yaml +task: bigbench_hhh_alignment_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..463450b0cb275e2ea6391eb5bed44782ad3265da --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hindi_question_answering_zero_shot +include: ../generate_until_template_yaml +task: bigbench_hindi_question_answering_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hindu_knowledge.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hindu_knowledge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7fef48a443c5256290c90650834832ebf2008000 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hindu_knowledge.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hindu_knowledge_zero_shot +include: ../generate_until_template_yaml +task: bigbench_hindu_knowledge_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/human_organs_senses.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/human_organs_senses.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2334fd6dc7d0a02751be1672d5f21eed837cb07b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/human_organs_senses.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: human_organs_senses_zero_shot +include: ../generate_until_template_yaml +task: bigbench_human_organs_senses_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/identify_math_theorems.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/identify_math_theorems.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4d0028e03dcb1af695e98fdb619c7d6d101e290c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/identify_math_theorems.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: identify_math_theorems_zero_shot +include: ../generate_until_template_yaml +task: bigbench_identify_math_theorems_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b4e1f9aa86cd9e29ad5109673b767dc33bde1e00 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: identify_odd_metaphor_zero_shot +include: ../generate_until_template_yaml +task: bigbench_identify_odd_metaphor_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0583a17e4b456ca0d6334353fc16d8e89e95b962 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: intent_recognition_zero_shot +include: ../generate_until_template_yaml +task: bigbench_intent_recognition_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_nli.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_nli.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1497c7802888d83da4c99cb1c0845e15da887584 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_nli.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: international_phonetic_alphabet_nli_zero_shot +include: ../generate_until_template_yaml +task: bigbench_international_phonetic_alphabet_nli_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..71ad3b9d4a7f980529e64ce4ebba38a4db026f05 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: international_phonetic_alphabet_transliterate_zero_shot +include: ../generate_until_template_yaml +task: bigbench_international_phonetic_alphabet_transliterate_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..556c5a62a7e31b56732dd158efca9111fa2b8f60 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: irony_identification_zero_shot +include: ../generate_until_template_yaml +task: bigbench_irony_identification_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/linguistic_mappings.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/linguistic_mappings.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc351ce11290861bdf9d9ce71fb46ee832282265 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/linguistic_mappings.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: linguistic_mappings_zero_shot +include: ../generate_until_template_yaml +task: bigbench_linguistic_mappings_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/list_functions.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/list_functions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..658630ac7a0ba0e0dfbc7c86e08a518866e6746c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/list_functions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: list_functions_zero_shot +include: ../generate_until_template_yaml +task: bigbench_list_functions_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_sequence.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_sequence.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b55c057b1e67b4d5af232a9f9710dbbd56f10899 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_sequence.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_sequence_zero_shot +include: ../generate_until_template_yaml +task: bigbench_logical_sequence_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/mathematical_induction.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/mathematical_induction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..59e4fc3f2bb68f45cd4ff9c158b82b4cd0032241 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/mathematical_induction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mathematical_induction_zero_shot +include: ../generate_until_template_yaml +task: bigbench_mathematical_induction_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/matrixshapes.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/matrixshapes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1a162eae1b4226ba93f7dce1f0d8c46800512f9e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/matrixshapes.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: matrixshapes_zero_shot +include: ../generate_until_template_yaml +task: bigbench_matrixshapes_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..029a4c0a073ccaefc8975ae37937319b27f1e7ee --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: metaphor_understanding_zero_shot +include: ../generate_until_template_yaml +task: bigbench_metaphor_understanding_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml new file mode 100644 index 0000000000000000000000000000000000000000..277bf69feff29559672655e47ce037df3c42c454 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: moral_permissibility_zero_shot +include: ../generate_until_template_yaml +task: bigbench_moral_permissibility_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/movie_recommendation.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/movie_recommendation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..97c370ce883eaab2f9ea3abad34f08b2d1838b22 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/movie_recommendation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: movie_recommendation_zero_shot +include: ../generate_until_template_yaml +task: bigbench_movie_recommendation_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9b77c895577fa3894b4f6646702c7e237436864b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: natural_instructions_zero_shot +include: ../generate_until_template_yaml +task: bigbench_natural_instructions_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/navigate.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/navigate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..549ed37058fb3c2a9db7eb9d0d6e6ba4c2868983 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/navigate.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: navigate_zero_shot +include: ../generate_until_template_yaml +task: bigbench_navigate_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/odd_one_out.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/odd_one_out.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a58d7b5fb25068bb2149f4112355106f91fe263a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/odd_one_out.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: odd_one_out_zero_shot +include: ../generate_until_template_yaml +task: bigbench_odd_one_out_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml new file mode 100644 index 0000000000000000000000000000000000000000..358184e11ced80305697c7e5f18317af2161bab9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: parsinlu_reading_comprehension_zero_shot +include: ../generate_until_template_yaml +task: bigbench_parsinlu_reading_comprehension_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/persian_idioms.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/persian_idioms.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e3aa0f47f46229e09b8d9bee0805eb4bbf5b671 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/persian_idioms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: persian_idioms_zero_shot +include: ../generate_until_template_yaml +task: bigbench_persian_idioms_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/physics_questions.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/physics_questions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3fcfd47776ba5be480ed396fb98534e3cc7316aa --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/physics_questions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physics_questions_zero_shot +include: ../generate_until_template_yaml +task: bigbench_physics_questions_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/play_dialog_same_or_different.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/play_dialog_same_or_different.yaml new file mode 100644 index 0000000000000000000000000000000000000000..57b65cfd3b09ccbf473a6788f28777d05b71112b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/play_dialog_same_or_different.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: play_dialog_same_or_different_zero_shot +include: ../generate_until_template_yaml +task: bigbench_play_dialog_same_or_different_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/polish_sequence_labeling.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/polish_sequence_labeling.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23775493c1ffe4e82c833515eef998f767b401db --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/polish_sequence_labeling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: polish_sequence_labeling_zero_shot +include: ../generate_until_template_yaml +task: bigbench_polish_sequence_labeling_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/question_selection.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/question_selection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8e2321a8db770ea9e20761f5b7b117cbdeb7b583 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/question_selection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: question_selection_zero_shot +include: ../generate_until_template_yaml +task: bigbench_question_selection_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/rephrase.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/rephrase.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16a337dbc2a8568cc36245f34b7eccaf28ed2548 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/rephrase.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: rephrase_zero_shot +include: ../generate_until_template_yaml +task: bigbench_rephrase_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/ruin_names.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/ruin_names.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9ceddad3293c7c5fc315302962a63f61274b322 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/ruin_names.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: ruin_names_zero_shot +include: ../generate_until_template_yaml +task: bigbench_ruin_names_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/scientific_press_release.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/scientific_press_release.yaml new file mode 100644 index 0000000000000000000000000000000000000000..122f66e7da0ec45e780fbb727809452c6ef64036 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/scientific_press_release.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: scientific_press_release_zero_shot +include: ../generate_until_template_yaml +task: bigbench_scientific_press_release_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/semantic_parsing_in_context_sparc.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/semantic_parsing_in_context_sparc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..276c997a1a6ea5d582cc89fe3ac858389aa287c3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/semantic_parsing_in_context_sparc.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: semantic_parsing_in_context_sparc_zero_shot +include: ../generate_until_template_yaml +task: bigbench_semantic_parsing_in_context_sparc_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/semantic_parsing_spider.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/semantic_parsing_spider.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39307d92fc3d5f78037102153cfd4e9cc0bb4b48 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/semantic_parsing_spider.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: semantic_parsing_spider_zero_shot +include: ../generate_until_template_yaml +task: bigbench_semantic_parsing_spider_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/sentence_ambiguity.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/sentence_ambiguity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..263b453fac68a15afa2b8d4ac14328fe6e096124 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/sentence_ambiguity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sentence_ambiguity_zero_shot +include: ../generate_until_template_yaml +task: bigbench_sentence_ambiguity_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ff5a1b1a8f51346978d03fd34cb6ad780f85f0b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json_zero_shot +include: ../generate_until_template_yaml +task: bigbench_simple_arithmetic_json_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_subtasks.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_subtasks.yaml new file mode 100644 index 0000000000000000000000000000000000000000..57052288e7fed1fabbe9a2c572b10c99f9a1fdcd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_subtasks.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json_subtasks_zero_shot +include: ../generate_until_template_yaml +task: bigbench_simple_arithmetic_json_subtasks_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_multiple_targets_json.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_multiple_targets_json.yaml new file mode 100644 index 0000000000000000000000000000000000000000..393ec8843a009267ea2515fe21105b50fed672e2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_multiple_targets_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_multiple_targets_json_zero_shot +include: ../generate_until_template_yaml +task: bigbench_simple_arithmetic_multiple_targets_json_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/sports_understanding.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/sports_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..474c08aeb104a3ad171efe2975ab6a6d86c51e2a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/sports_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sports_understanding_zero_shot +include: ../generate_until_template_yaml +task: bigbench_sports_understanding_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/strange_stories.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/strange_stories.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f5405d92e2eea8199985004288270fc1c50bce96 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/strange_stories.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: strange_stories_zero_shot +include: ../generate_until_template_yaml +task: bigbench_strange_stories_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47c4b25c971fbbf78c5d62ee79de7c0699af2ba9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: strategyqa_zero_shot +include: ../generate_until_template_yaml +task: bigbench_strategyqa_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/suicide_risk.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/suicide_risk.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e276c4a051d1507991e00499f344c72fe42a4147 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/suicide_risk.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: suicide_risk_zero_shot +include: ../generate_until_template_yaml +task: bigbench_suicide_risk_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/swedish_to_german_proverbs.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/swedish_to_german_proverbs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a13d6f7fe014a2ab9a55fdb86cff68f8cb3401d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/swedish_to_german_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: swedish_to_german_proverbs_zero_shot +include: ../generate_until_template_yaml +task: bigbench_swedish_to_german_proverbs_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/tracking_shuffled_objects.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/tracking_shuffled_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c02866c8f07d5d8d9fdfd0459bbd01f327d19b3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/tracking_shuffled_objects.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: tracking_shuffled_objects_zero_shot +include: ../generate_until_template_yaml +task: bigbench_tracking_shuffled_objects_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/undo_permutation.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/undo_permutation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3f0e914c87cb31eea9b9524c4552eca2234eadce --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/undo_permutation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: undo_permutation_zero_shot +include: ../generate_until_template_yaml +task: bigbench_undo_permutation_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/unit_interpretation.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/unit_interpretation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34c882dc1dde88d9b57144260b4f90390f548ce6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/unit_interpretation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unit_interpretation_zero_shot +include: ../generate_until_template_yaml +task: bigbench_unit_interpretation_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/vitaminc_fact_verification.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/vitaminc_fact_verification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f2ad8d3fd46a37ffc4fad10c1d927324054e043 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/vitaminc_fact_verification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: vitaminc_fact_verification_zero_shot +include: ../generate_until_template_yaml +task: bigbench_vitaminc_fact_verification_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/which_wiki_edit.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/which_wiki_edit.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc05c377785c652d603e275b6e9df7608eeef5fc --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/which_wiki_edit.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: which_wiki_edit_zero_shot +include: ../generate_until_template_yaml +task: bigbench_which_wiki_edit_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/word_sorting.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/word_sorting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16be6060b7700a43fb4f1084fd753e72d370b20e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/word_sorting.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_sorting_zero_shot +include: ../generate_until_template_yaml +task: bigbench_word_sorting_generate_until