diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml new file mode 100644 index 0000000000000000000000000000000000000000..831361984ab186fb29835595db2853469ee0f7e6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: anachronisms_zero_shot +include: ../generate_until_template_yaml +task: bigbench_anachronisms_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3d7510dfc80d4e52db0cc020f5f2abcdf9952795 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: authorship_verification_zero_shot +include: ../generate_until_template_yaml +task: bigbench_authorship_verification_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d8802c1c85d3dd4ae02f04a86982b08be6e214e3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: auto_debugging_zero_shot +include: ../generate_until_template_yaml +task: bigbench_auto_debugging_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/conlang_translation.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/conlang_translation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec9cccc8c72e887e047a5871c496d68498f7f576 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/conlang_translation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: conlang_translation_zero_shot +include: ../generate_until_template_yaml +task: bigbench_conlang_translation_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cryptonite.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cryptonite.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3393c36805d6b29cd3d59481b11c8b8dd45e2910 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/cryptonite.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cryptonite_zero_shot +include: ../generate_until_template_yaml +task: bigbench_cryptonite_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f13ec2a4a0fc2dd244aefb53cb7e409fdb2bdad1 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: dark_humor_detection_zero_shot +include: ../generate_until_template_yaml +task: bigbench_dark_humor_detection_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3eafb81943aec74feb620500ba8281f62249873b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: emojis_emotion_prediction_zero_shot +include: ../generate_until_template_yaml +task: bigbench_emojis_emotion_prediction_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b640b9430ad8a11758152c63ad0c77497fd16d50 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: evaluating_information_essentiality_zero_shot +include: ../generate_until_template_yaml +task: bigbench_evaluating_information_essentiality_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml new file mode 100644 index 0000000000000000000000000000000000000000..62dd5197439239a86c7d044d28fd936226481a02 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: fact_checker_zero_shot +include: ../generate_until_template_yaml +task: bigbench_fact_checker_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/fantasy_reasoning.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/fantasy_reasoning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b94f4c05b924d9ca001addc50ba76a03fc3a32f7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/fantasy_reasoning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: fantasy_reasoning_zero_shot +include: ../generate_until_template_yaml +task: bigbench_fantasy_reasoning_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/formal_fallacies_syllogisms_negation.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/formal_fallacies_syllogisms_negation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d3afc0edf2efd7056f8d46ad0d85ae55c7073be8 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/formal_fallacies_syllogisms_negation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: formal_fallacies_syllogisms_negation_zero_shot +include: ../generate_until_template_yaml +task: bigbench_formal_fallacies_syllogisms_negation_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/gender_inclusive_sentences_german.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/gender_inclusive_sentences_german.yaml new file mode 100644 index 0000000000000000000000000000000000000000..12dd01b8b299a1fd703c8853653eea979543b0a5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/gender_inclusive_sentences_german.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gender_inclusive_sentences_german_zero_shot +include: ../generate_until_template_yaml +task: bigbench_gender_inclusive_sentences_german_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml new file mode 100644 index 0000000000000000000000000000000000000000..22748246128e774650563a8652a94d57b0e5a338 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: goal_step_wikihow_zero_shot +include: ../generate_until_template_yaml +task: bigbench_goal_step_wikihow_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1e428c2a5304d43efc1b00ff53e8d3de493c115b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hyperbaton_zero_shot +include: ../generate_until_template_yaml +task: bigbench_hyperbaton_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/intersect_geometry.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/intersect_geometry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f2868a4a7c7345f4fe40047e1ecb4e06a53e3ee --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/intersect_geometry.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: intersect_geometry_zero_shot +include: ../generate_until_template_yaml +task: bigbench_intersect_geometry_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ea697d1f7664866050ecbd0615ea3e957a13602 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: key_value_maps_zero_shot +include: ../generate_until_template_yaml +task: bigbench_key_value_maps_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/language_identification.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/language_identification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9cb7b27408b9a82c308ebac33b89e799df0763a0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/language_identification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: language_identification_zero_shot +include: ../generate_until_template_yaml +task: bigbench_language_identification_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aa8f2c2fefbed31d42e61db5261810b49e7ff35e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logic_grid_puzzle_zero_shot +include: ../generate_until_template_yaml +task: bigbench_logic_grid_puzzle_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a5e5e102ae68e5c472cfb368652064f4f67259fe --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: misconceptions_russian_zero_shot +include: ../generate_until_template_yaml +task: bigbench_misconceptions_russian_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..edbb2b34b8cceb119a191942fb617cf99367cd40 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: modified_arithmetic_zero_shot +include: ../generate_until_template_yaml +task: bigbench_modified_arithmetic_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/nonsense_words_grammar.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/nonsense_words_grammar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0ed30902f6ec63439564b5e021807eb4ae672967 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/nonsense_words_grammar.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: nonsense_words_grammar_zero_shot +include: ../generate_until_template_yaml +task: bigbench_nonsense_words_grammar_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/novel_concepts.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/novel_concepts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..12f388f8ef8164c30c0843d0a0cda59bc108d66d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/novel_concepts.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: novel_concepts_zero_shot +include: ../generate_until_template_yaml +task: bigbench_novel_concepts_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/operators.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/operators.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d6aaa8b61799f665645249c19d833593576709c6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/operators.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: operators_zero_shot +include: ../generate_until_template_yaml +task: bigbench_operators_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..552f8c6068fde183ab744a1e322c41c8744070e0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: parsinlu_qa_zero_shot +include: ../generate_until_template_yaml +task: bigbench_parsinlu_qa_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c5c96cec606f6ba3e749c970b20f71d9ed200799 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: periodic_elements_zero_shot +include: ../generate_until_template_yaml +task: bigbench_periodic_elements_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/phrase_relatedness.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/phrase_relatedness.yaml new file mode 100644 index 0000000000000000000000000000000000000000..037da053e4e5ed3869f75976ebca9ae81d394314 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/phrase_relatedness.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: phrase_relatedness_zero_shot +include: ../generate_until_template_yaml +task: bigbench_phrase_relatedness_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/physics.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39bc786bae05862d66b4f358313feee70ee8d14a --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/physics.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physics_zero_shot +include: ../generate_until_template_yaml +task: bigbench_physics_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/presuppositions_as_nli.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/presuppositions_as_nli.yaml new file mode 100644 index 0000000000000000000000000000000000000000..70da2d747022062c552856c3594c5033b1401562 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/presuppositions_as_nli.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: presuppositions_as_nli_zero_shot +include: ../generate_until_template_yaml +task: bigbench_presuppositions_as_nli_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/reasoning_about_colored_objects.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/reasoning_about_colored_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0b371d6e37baabaadb7a7e7424a12cd9dd7b81b9 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/reasoning_about_colored_objects.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: reasoning_about_colored_objects_zero_shot +include: ../generate_until_template_yaml +task: bigbench_reasoning_about_colored_objects_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/repeat_copy_logic.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/repeat_copy_logic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bd8cd4d8563d4be2b92e18fcd48adc13d6c06f9e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/repeat_copy_logic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: repeat_copy_logic_zero_shot +include: ../generate_until_template_yaml +task: bigbench_repeat_copy_logic_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/social_iqa.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/social_iqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4ba7721de1664e92a1f2de1359c44a5a1bf2e23c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/social_iqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: social_iqa_zero_shot +include: ../generate_until_template_yaml +task: bigbench_social_iqa_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/social_support.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/social_support.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc00bb83755f75220a068b9c97047ec02e1eafed --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/social_support.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: social_support_zero_shot +include: ../generate_until_template_yaml +task: bigbench_social_support_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/temporal_sequences.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/temporal_sequences.yaml new file mode 100644 index 0000000000000000000000000000000000000000..414dc51b137fb55037b5b9bc109bba116ee72d34 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/temporal_sequences.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: temporal_sequences_zero_shot +include: ../generate_until_template_yaml +task: bigbench_temporal_sequences_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/what_is_the_tao.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/what_is_the_tao.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a1487ab41c445cda992e30235947c6e8e9f01db --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/what_is_the_tao.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: what_is_the_tao_zero_shot +include: ../generate_until_template_yaml +task: bigbench_what_is_the_tao_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/generate_until/word_unscrambling.yaml b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/word_unscrambling.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5632a79c639f23b9635a810176a5ea10343c506f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/generate_until/word_unscrambling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_unscrambling_zero_shot +include: ../generate_until_template_yaml +task: bigbench_word_unscrambling_generate_until diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1e2903c3a0782f665f0010e84b2c073c8a5ce10 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: anachronisms_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_anachronisms_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ecf8fb5f34c637fe6b8d3995ef51b1f5ca48cce --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: analytic_entailment_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_analytic_entailment_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9b19b92fde9418c5df171864249e414862e673d3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: arithmetic_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_arithmetic_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml new file mode 100644 index 0000000000000000000000000000000000000000..72db1d8ee2a6cd04694e4bf6f48937e13bb7a692 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: auto_debugging_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_auto_debugging_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c187505d302f723db6d4c7be0d6c464cce79047c --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cryobiology_spanish_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_cryobiology_spanish_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0b8e694c07ef0ffe8d012605ba9fa00510a16422 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cs_algorithms_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_cs_algorithms_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2851f0bbbbd5265dc7b2cab72a8c8ffb8d85f22f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: date_understanding_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_date_understanding_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2827232a601ebcd2eb217df8c4ff2dde3542fc2d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: disambiguation_qa_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_disambiguation_qa_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a18733fb7b5698c2649a57ad883cd3e1436130d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: discourse_marker_prediction_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_discourse_marker_prediction_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml new file mode 100644 index 0000000000000000000000000000000000000000..48d6f32e4504687fd22d6715d935eb404d279a4d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: dyck_languages_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_dyck_languages_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b85acd95aedcfc2e197984c7bf9901b28c975f14 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: evaluating_information_essentiality_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_evaluating_information_essentiality_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..68a83956eb58a15aaf9ae8fa705f2883cbf3a9a6 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: figure_of_speech_detection_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_figure_of_speech_detection_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2fef6d9301484a42a5a4cd26f2df0dd241b0d104 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: human_organs_senses_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_human_organs_senses_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34b377101839b652ea0143e8c9ba318c8c9542fd --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hyperbaton_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_hyperbaton_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a26fd55cef16b9367bf6ee836ab6432de42b776 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: implicatures_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_implicatures_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75a673c896a6af18a7643f7c5c3c315faa03f970 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: key_value_maps_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_key_value_maps_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml new file mode 100644 index 0000000000000000000000000000000000000000..84f55f644909026aec8cfd9e8cf8321e45bd6255 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_args_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_logical_args_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9facf63967bd8a281e2053203d90a6622c7d82bb --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: matrixshapes_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_matrixshapes_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6661a54f7f74ac9cbf97d739ee8af018e8b97106 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: metaphor_understanding_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_metaphor_understanding_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..67109c8cbb941013dd106d486f57c7caa0a2cff4 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: minute_mysteries_qa_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_minute_mysteries_qa_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..63d0fcda69e3109695b0a250b00b214f822e1568 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: misconceptions_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_misconceptions_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a1b091da92673584755f4d16d053043606af54a3 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mnist_ascii_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_mnist_ascii_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3829555221c2cd7ab0359d4f4074c4c00da57adc --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: moral_permissibility_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_moral_permissibility_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml new file mode 100644 index 0000000000000000000000000000000000000000..10ff48ea585e99e2bc45b5632e117788b9ad5be5 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: multiemo_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_multiemo_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..089376dd8ee05da574c844f37ad9c1c7a23cd162 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physical_intuition_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_physical_intuition_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..44646f146a028329d1b59e30797c49a38df1e1e2 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physics_questions_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_physics_questions_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85aac7f4b66045c7e0d1ea68710c4bdb06c1a1fb --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: play_dialog_same_or_different_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_play_dialog_same_or_different_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8138791fffc5ff4f8a20d28113459c54d44b4385 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: real_or_fake_text_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_real_or_fake_text_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..666aa49b060cd26a95d919cc4e920006e38ee6ca --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: repeat_copy_logic_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_repeat_copy_logic_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml new file mode 100644 index 0000000000000000000000000000000000000000..32c38ba3786f863626e4ae23fe12c016ade5b8af --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: ruin_names_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_ruin_names_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a988e54c51380f004e22cc303812e192c8291328 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: semantic_parsing_spider_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_semantic_parsing_spider_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e4a18f1ada69ac34ab9ccf76ed1b98fc1b9aae7 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sentence_ambiguity_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_sentence_ambiguity_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ecf7465ff2e98da9fb98a5c883f09604ef4fc7d0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: suicide_risk_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_suicide_risk_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d2f31d3c033f5932d73ce61e7edf06ae19ab9b2f --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: swedish_to_german_proverbs_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_swedish_to_german_proverbs_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98e3d5b3694354c11446c8efc8583694c33d462d --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: symbol_interpretation_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_symbol_interpretation_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..45943005c7420a81f6420bf7197259a0613ffedc --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unnatural_in_context_learning_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_unnatural_in_context_learning_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7879d1661eed9dd083ab3acabc737f756a29735e --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: what_is_the_tao_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_what_is_the_tao_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98bc6e4b23a75abd1a4a560260b88a95034e1f0b --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: winowhy_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_winowhy_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..71e79ae36353d0ca44548b60dfc0d623c4584edf --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_sorting_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_word_sorting_multiple_choice diff --git a/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bbfeb14458a96cbf4c9ff9273efefa92f10128d0 --- /dev/null +++ b/lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_unscrambling_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_word_unscrambling_multiple_choice