diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d6ae791f5f3b7057f4d7927a986ec57bc27cb7cb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: arithmetic_zero_shot +include: ../generate_until_template_yaml +task: bigbench_arithmetic_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/auto_categorization.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/auto_categorization.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d90a0e7cc31f1c7a04f7b509a26513d6bdb22c00 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/auto_categorization.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: auto_categorization_zero_shot +include: ../generate_until_template_yaml +task: bigbench_auto_categorization_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/chess_state_tracking.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/chess_state_tracking.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8b3dde85706c6b50ca3c597443efb6686037fe8b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/chess_state_tracking.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: chess_state_tracking_zero_shot +include: ../generate_until_template_yaml +task: bigbench_chess_state_tracking_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3eafb81943aec74feb620500ba8281f62249873b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: emojis_emotion_prediction_zero_shot +include: ../generate_until_template_yaml +task: bigbench_emojis_emotion_prediction_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/gem.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/gem.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f59f287869076ebf202cbf4f01d52b2935f87820 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/gem.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gem_zero_shot +include: ../generate_until_template_yaml +task: bigbench_gem_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c5c437a4ad0322775013c80ff48cd1d875eb2cff --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hhh_alignment_zero_shot +include: ../generate_until_template_yaml +task: bigbench_hhh_alignment_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_nli.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_nli.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1497c7802888d83da4c99cb1c0845e15da887584 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_nli.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: international_phonetic_alphabet_nli_zero_shot +include: ../generate_until_template_yaml +task: bigbench_international_phonetic_alphabet_nli_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/kanji_ascii.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/kanji_ascii.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f9a8a5b86f69a9966116c203a114d2d0ca5428e7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/kanji_ascii.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: kanji_ascii_zero_shot +include: ../generate_until_template_yaml +task: bigbench_kanji_ascii_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/language_games.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/language_games.yaml new file mode 100644 index 0000000000000000000000000000000000000000..560223007d7670499ec5064dddf200c0a252fc89 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/language_games.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: language_games_zero_shot +include: ../generate_until_template_yaml +task: bigbench_language_games_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/list_functions.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/list_functions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..658630ac7a0ba0e0dfbc7c86e08a518866e6746c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/list_functions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: list_functions_zero_shot +include: ../generate_until_template_yaml +task: bigbench_list_functions_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/misconceptions.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/misconceptions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f3375eb60927e49931f96289b8ddb6b0f2a3d002 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/misconceptions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: misconceptions_zero_shot +include: ../generate_until_template_yaml +task: bigbench_misconceptions_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a5e5e102ae68e5c472cfb368652064f4f67259fe --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: misconceptions_russian_zero_shot +include: ../generate_until_template_yaml +task: bigbench_misconceptions_russian_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml new file mode 100644 index 0000000000000000000000000000000000000000..277bf69feff29559672655e47ce037df3c42c454 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: moral_permissibility_zero_shot +include: ../generate_until_template_yaml +task: bigbench_moral_permissibility_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/mult_data_wrangling.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/mult_data_wrangling.yaml new file mode 100644 index 0000000000000000000000000000000000000000..622c7ab13312abd8aa3d1ad7d932ce06b13b4ba5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/mult_data_wrangling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mult_data_wrangling_zero_shot +include: ../generate_until_template_yaml +task: bigbench_mult_data_wrangling_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/multiemo.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/multiemo.yaml new file mode 100644 index 0000000000000000000000000000000000000000..465ccd0ce4f15270edcc4a4e2585764ee59d4e71 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/multiemo.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: multiemo_zero_shot +include: ../generate_until_template_yaml +task: bigbench_multiemo_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml new file mode 100644 index 0000000000000000000000000000000000000000..358184e11ced80305697c7e5f18317af2161bab9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: parsinlu_reading_comprehension_zero_shot +include: ../generate_until_template_yaml +task: bigbench_parsinlu_reading_comprehension_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9fb5b23036510e8256774fb0d32964a590ff9dfe --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: qa_wikidata_zero_shot +include: ../generate_until_template_yaml +task: bigbench_qa_wikidata_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/rephrase.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/rephrase.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16a337dbc2a8568cc36245f34b7eccaf28ed2548 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/rephrase.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: rephrase_zero_shot +include: ../generate_until_template_yaml +task: bigbench_rephrase_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_multiple_choice.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_multiple_choice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8d1309732627fa2701012c7c53de12f42c0408cf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_multiple_choice.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json_multiple_choice_zero_shot +include: ../generate_until_template_yaml +task: bigbench_simple_arithmetic_json_multiple_choice_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/snarks.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/snarks.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d362537a181c1f6d3f72f139253f94d04b8154b6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/snarks.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: snarks_zero_shot +include: ../generate_until_template_yaml +task: bigbench_snarks_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/social_support.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/social_support.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc00bb83755f75220a068b9c97047ec02e1eafed --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/social_support.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: social_support_zero_shot +include: ../generate_until_template_yaml +task: bigbench_social_support_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/sports_understanding.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/sports_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..474c08aeb104a3ad171efe2975ab6a6d86c51e2a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/sports_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sports_understanding_zero_shot +include: ../generate_until_template_yaml +task: bigbench_sports_understanding_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47c4b25c971fbbf78c5d62ee79de7c0699af2ba9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: strategyqa_zero_shot +include: ../generate_until_template_yaml +task: bigbench_strategyqa_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/understanding_fables.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/understanding_fables.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9972f4034148bd4f8f4b59b122a89a416f3d5c2f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/understanding_fables.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: understanding_fables_zero_shot +include: ../generate_until_template_yaml +task: bigbench_understanding_fables_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/unit_interpretation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/unit_interpretation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34c882dc1dde88d9b57144260b4f90390f548ce6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/unit_interpretation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unit_interpretation_zero_shot +include: ../generate_until_template_yaml +task: bigbench_unit_interpretation_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/vitaminc_fact_verification.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/vitaminc_fact_verification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f2ad8d3fd46a37ffc4fad10c1d927324054e043 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/vitaminc_fact_verification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: vitaminc_fact_verification_zero_shot +include: ../generate_until_template_yaml +task: bigbench_vitaminc_fact_verification_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/winowhy.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/winowhy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..99ff22d9c7f80dc3d05cfed74ec8749e7b8790d3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/winowhy.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: winowhy_zero_shot +include: ../generate_until_template_yaml +task: bigbench_winowhy_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16e62e69ba2f183473fea70c68513a87534797e6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: auto_categorization_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_auto_categorization_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml new file mode 100644 index 0000000000000000000000000000000000000000..72db1d8ee2a6cd04694e4bf6f48937e13bb7a692 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: auto_debugging_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_auto_debugging_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c39ec2780916b80be6e6841c4d8fa8babf916d15 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cause_and_effect_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_cause_and_effect_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0a9883d0eb304b2cf1425e3b04a657e4fb7b0903 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: checkmate_in_one_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_checkmate_in_one_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ea29979786c98ae463812876dfc5d7027d14e3dd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: chess_state_tracking_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_chess_state_tracking_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c24d5761fd9efcab1e0b4d69e9b9e1474c1f1aaa --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: chinese_remainder_theorem_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_chinese_remainder_theorem_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f5918e604dd83fe4c748fe0e509374d32ed065ca --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cifar10_classification_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_cifar10_classification_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5655ea1f5ac17b0c8772a5f4a0443e38e94f1bfa --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: codenames_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_codenames_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/color.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/color.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7350013f1bc15f7ea30fbfac6c2ea1e2bdc31beb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/color.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: color_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_color_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf8f3aca16eacc0bb33cf2240576b9dfe6361bed --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: common_morpheme_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_common_morpheme_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ee13b377bba666a1912137584d8b9fb572eb6b5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: conceptual_combinations_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_conceptual_combinations_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c5e0519f0fd1e7b0e7210f2b4fc84caaa45c2843 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cryptonite_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_cryptonite_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf8494cf9464d41e293f9a9d18a946a0f53e13ae --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: disfl_qa_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_disfl_qa_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64cb58ff24350264be6d1bca702983501dce144b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: elementary_math_qa_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_elementary_math_qa_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c848740b2c4ad2f73fd3c54ba1ec5a48ea0e1d72 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: empirical_judgments_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_empirical_judgments_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed26147aec06003944012d1e8fb1f6d49363fd2d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: english_russian_proverbs_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_english_russian_proverbs_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39fcd9cf49e2e42a54e9cb3894607c6f74f20482 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: few_shot_nlg_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_few_shot_nlg_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..68a83956eb58a15aaf9ae8fa705f2883cbf3a9a6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: figure_of_speech_detection_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_figure_of_speech_detection_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gem.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gem.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf81e88006f0ef68f26af3fdad9fc2aa48fd92c0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gem.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gem_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_gem_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39eee21af5dfc4d90850e1bfd6e034c784b6cd3d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gender_inclusive_sentences_german_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_gender_inclusive_sentences_german_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8083b8698ece8e253404a17fed5c13b46aaad9b3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: general_knowledge_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_general_knowledge_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml new file mode 100644 index 0000000000000000000000000000000000000000..53523c33219911c0a2c82b4b1481fd1f0b4f1f53 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: gre_reading_comprehension_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_gre_reading_comprehension_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c5e4f24aa7f2a3ac109468474b903f62f891b437 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hhh_alignment_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_hhh_alignment_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a26fd55cef16b9367bf6ee836ab6432de42b776 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: implicatures_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_implicatures_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6014a175f1c3278ece40a6d1f77d2a944a9f1601 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/intersect_geometry.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: intersect_geometry_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_intersect_geometry_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1c5f629386f4598bf3c8e67507f1a6adb077bd1f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/known_unknowns.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: known_unknowns_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_known_unknowns_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ea141fb04a87df23cd4c71723c277da11a3ef9f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: language_identification_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_language_identification_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml new file mode 100644 index 0000000000000000000000000000000000000000..50800d9deb31a5107c865318ba82e5bdb0fd21a8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/linguistic_mappings.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: linguistic_mappings_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_linguistic_mappings_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1c6411afc8f5e50c6b7e88dfe55db027f1da0b49 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_fallacy_detection_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_logical_fallacy_detection_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9facf63967bd8a281e2053203d90a6622c7d82bb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/matrixshapes.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: matrixshapes_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_matrixshapes_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c476c4eb9b24b918a97c1e88943b862209db85d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: metaphor_boolean_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_metaphor_boolean_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a1b091da92673584755f4d16d053043606af54a3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mnist_ascii_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_mnist_ascii_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c8a2373588920c97d03c0dafd829a73174433161 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/modified_arithmetic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: modified_arithmetic_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_modified_arithmetic_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml new file mode 100644 index 0000000000000000000000000000000000000000..10ff48ea585e99e2bc45b5632e117788b9ad5be5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/multiemo.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: multiemo_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_multiemo_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2cfc8283e831283b55d7bab4c3e801fae2232fc5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: paragraph_segmentation_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_paragraph_segmentation_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7a9b61fb16af05b6dd34025fe6fbac184839cb61 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/parsinlu_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: parsinlu_qa_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_parsinlu_qa_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b7a644f9d7448f6ce49ed3836c3c1ed06e1f33a5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/periodic_elements.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: periodic_elements_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_periodic_elements_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8138791fffc5ff4f8a20d28113459c54d44b4385 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/real_or_fake_text.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: real_or_fake_text_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_real_or_fake_text_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a988e54c51380f004e22cc303812e192c8291328 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: semantic_parsing_spider_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_semantic_parsing_spider_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e4a18f1ada69ac34ab9ccf76ed1b98fc1b9aae7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sentence_ambiguity_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_sentence_ambiguity_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8688512bda759ae89b34230ffefa5ff477e69eb7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json_subtasks_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_simple_arithmetic_json_subtasks_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml new file mode 100644 index 0000000000000000000000000000000000000000..685ec17c1ad672bc07df05bb140c1400043bc2d6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_multiple_targets_json_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_simple_arithmetic_multiple_targets_json_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml new file mode 100644 index 0000000000000000000000000000000000000000..30877750e5ba4c9a0de36019ec03effdb2ac1791 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/strange_stories.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: strange_stories_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_strange_stories_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..40103274e9781f1d2736bf1f4aabdaa08dc9aa21 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/swahili_english_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: swahili_english_proverbs_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_swahili_english_proverbs_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml new file mode 100644 index 0000000000000000000000000000000000000000..abd8834b0f30b30768ab9aec4524c3c4142d0530 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: temporal_sequences_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_temporal_sequences_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/timedial.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/timedial.yaml new file mode 100644 index 0000000000000000000000000000000000000000..350d4e786c1a46ddc057edeffde8695772b5f24a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/timedial.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: timedial_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_timedial_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f9aa366b7abcc27a0efcbc825068c0fdfdd4c929 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: tracking_shuffled_objects_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_tracking_shuffled_objects_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bbfeb14458a96cbf4c9ff9273efefa92f10128d0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_unscrambling_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_word_unscrambling_multiple_choice