diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34cefc2543a16a02883ad493eb9d44634c186ea6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: abstract_narrative_understanding_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_abstract_narrative_understanding_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1e2903c3a0782f665f0010e84b2c073c8a5ce10 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: anachronisms_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_anachronisms_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6e20092e9dce7594545786eb54ed587813158ba4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: analogical_similarity_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_analogical_similarity_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9ecf8fb5f34c637fe6b8d3995ef51b1f5ca48cce --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: analytic_entailment_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_analytic_entailment_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9b19b92fde9418c5df171864249e414862e673d3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: arithmetic_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_arithmetic_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..254f115b6517f22ead0c74870cb835299c3f6130 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: ascii_word_recognition_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_ascii_word_recognition_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3c4be304435bac358b7ddb732f60605ab029ed82 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: bbq_lite_json_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_bbq_lite_json_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..73448ad929c76ab0e3b59cdd244f0cb429f9b92b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: bridging_anaphora_resolution_barqa_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_bridging_anaphora_resolution_barqa_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1d09f2d463394f3300e533a3013f021153195a09 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: causal_judgment_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_causal_judgment_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3bf9d9bf56702e0e52e53849cafc874dd6588778 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: contextual_parametric_knowledge_conflicts_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_contextual_parametric_knowledge_conflicts_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4aca69ad45a59b9773c040afb2e0c76327f19e87 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: crash_blossom_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_crash_blossom_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0b8e694c07ef0ffe8d012605ba9fa00510a16422 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cs_algorithms_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_cs_algorithms_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2851f0bbbbd5265dc7b2cab72a8c8ffb8d85f22f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: date_understanding_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_date_understanding_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2827232a601ebcd2eb217df8c4ff2dde3542fc2d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: disambiguation_qa_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_disambiguation_qa_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ff648d9c8f4aa1d4a16500ebf591e5b38abd0b6a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: emojis_emotion_prediction_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_emojis_emotion_prediction_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8adc12e96ee2726b36027b0ed604faf502c43f3c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: english_proverbs_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_english_proverbs_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..24444e55d012584520eae448baef75245c1fe870 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: entailed_polarity_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_entailed_polarity_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c35581af46c580527af70b12aebe60aa808181d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: epistemic_reasoning_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_epistemic_reasoning_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b85acd95aedcfc2e197984c7bf9901b28c975f14 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: evaluating_information_essentiality_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_evaluating_information_essentiality_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4fbed8039d6a6a5442ea23afb20cec36754a88e9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/fact_checker.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: fact_checker_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_fact_checker_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..68a55e473930188eec62273d46823c23677d0b51 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/fantasy_reasoning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: fantasy_reasoning_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_fantasy_reasoning_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ff37fd7b390f252adc11541a7c37e313d2a378b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: formal_fallacies_syllogisms_negation_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_formal_fallacies_syllogisms_negation_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml new file mode 100644 index 0000000000000000000000000000000000000000..321f7513755a6b9555e6ef712e723725df698b43 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hindu_knowledge_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_hindu_knowledge_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml new file mode 100644 index 0000000000000000000000000000000000000000..34b377101839b652ea0143e8c9ba318c8c9542fd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hyperbaton_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_hyperbaton_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f716129d6d37cc4dac56b81dc083fec1c799f085 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: identify_math_theorems_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_identify_math_theorems_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9bb0844203dd229e0f68eea232d2d2c14a1ae733 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: implicit_relations_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_implicit_relations_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..720ac92ae42d54aa469b8c999c518cfc068c2d78 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: intent_recognition_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_intent_recognition_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a19ff99e55b6c61967b850dc0e356d0d474dc8fb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: irony_identification_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_irony_identification_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75a673c896a6af18a7643f7c5c3c315faa03f970 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: key_value_maps_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_key_value_maps_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml new file mode 100644 index 0000000000000000000000000000000000000000..07e2711b457b0276f06b5489d406169bdf63149f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: language_games_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_language_games_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e269cd04e915e745b05714aa35b384d0a4305ab3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: linguistics_puzzles_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_linguistics_puzzles_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml new file mode 100644 index 0000000000000000000000000000000000000000..da6a018fa8dd7d44e6b0a26bb481a65061bb8988 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logic_grid_puzzle.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logic_grid_puzzle_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_logic_grid_puzzle_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6661a54f7f74ac9cbf97d739ee8af018e8b97106 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/metaphor_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: metaphor_understanding_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_metaphor_understanding_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f9c5db38f81186c27a588c70b8856c8881be6310 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/misconceptions_russian.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: misconceptions_russian_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_misconceptions_russian_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml new file mode 100644 index 0000000000000000000000000000000000000000..89b93d9d807f8fdc73a4447aaec6b37d0779e69a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: movie_dialog_same_or_different_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_movie_dialog_same_or_different_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7055028ee950f7a56449e88854b7c2971b825d50 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: movie_recommendation_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_movie_recommendation_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml new file mode 100644 index 0000000000000000000000000000000000000000..17b67bcc6d6950a8296d0dddb50ed3b9d383e231 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/mult_data_wrangling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mult_data_wrangling_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_mult_data_wrangling_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/navigate.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/navigate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69f27904bcf22b2d64c1b22040f860a9855fd06 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/navigate.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: navigate_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_navigate_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..52d25bcacd61f9cf13f68f08c95bbaa6d5da7c21 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: nonsense_words_grammar_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_nonsense_words_grammar_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3fc74aa9ce7e6bddb32868680a723e76fa3575df --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: novel_concepts_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_novel_concepts_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aaa43e678ec29f4f54127263fc950cd459966528 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: odd_one_out_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_odd_one_out_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6fa92ed3a8ec59135f3ffa8c245efbef327bb1b1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/persian_idioms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: persian_idioms_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_persian_idioms_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c797aec6e6d3f781b6a6882178f7ff34eb922a04 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: phrase_relatedness_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_phrase_relatedness_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/physics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc06f79dff07e43a966c3c776b5d421b2f826f90 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/physics.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physics_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_physics_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..44646f146a028329d1b59e30797c49a38df1e1e2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physics_questions_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_physics_questions_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85aac7f4b66045c7e0d1ea68710c4bdb06c1a1fb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/play_dialog_same_or_different.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: play_dialog_same_or_different_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_play_dialog_same_or_different_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml new file mode 100644 index 0000000000000000000000000000000000000000..71a56aa805c70cfb32013c7895ffe5412475c446 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/presuppositions_as_nli.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: presuppositions_as_nli_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_presuppositions_as_nli_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..263d61ebe60d1f8ac22a119b6ead38df0d2dc03b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: qa_wikidata_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_qa_wikidata_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ab6d5e06243e5701634cd0f23b3366349a7c9fd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/reasoning_about_colored_objects.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: reasoning_about_colored_objects_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_reasoning_about_colored_objects_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..666aa49b060cd26a95d919cc4e920006e38ee6ca --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/repeat_copy_logic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: repeat_copy_logic_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_repeat_copy_logic_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49e3cb4b8dfc5eba4e84c94b4776c4e1f682ddd4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/rephrase.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: rephrase_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_rephrase_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml new file mode 100644 index 0000000000000000000000000000000000000000..32c38ba3786f863626e4ae23fe12c016ade5b8af --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/ruin_names.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: ruin_names_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_ruin_names_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d930e7419a4175762e8cacf1f5297cc4424dd0d6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: salient_translation_error_detection_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_salient_translation_error_detection_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f23190e7acc67fd26699f5b80652e3ec674b49c8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/scientific_press_release.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: scientific_press_release_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_scientific_press_release_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..82b86d1b47b857cb7a5b1d8b7789ecadddce8bed --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/similarities_abstraction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: similarities_abstraction_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_similarities_abstraction_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b1849d5e56a1e6dfbfa0939e975af6c86708182 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simp_turing_concept.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simp_turing_concept_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_simp_turing_concept_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cd1b61b9b0fe70c08c83ebf6c791c5d1437cdd08 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_simple_arithmetic_json_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e63fce945afc5a55f565e0f0e6c4cd5ce4012dc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json_multiple_choice_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_simple_arithmetic_json_multiple_choice_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0983381ba2031eaa858d615cb13b1c3825b6d464 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_ethical_questions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_ethical_questions_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_simple_ethical_questions_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..13b67888cd767063dc1c0ceeceeb92c256b54a7d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_text_editing_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_simple_text_editing_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a4da50c90c5ea470cd3bb7abcdd65aba2f20e66b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: social_iqa_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_social_iqa_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e5a123fc9367fe62c43883eecc60bb477903bd26 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sports_understanding_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_sports_understanding_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f988071bad05cd514cd30791720933d77fb2f255 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: strategyqa_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_strategyqa_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f53d677caa8fd15389d25592db83dfafb56c768e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/sufficient_information.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sufficient_information_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_sufficient_information_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ecf7465ff2e98da9fb98a5c883f09604ef4fc7d0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: suicide_risk_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_suicide_risk_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d2f31d3c033f5932d73ce61e7edf06ae19ab9b2f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: swedish_to_german_proverbs_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_swedish_to_german_proverbs_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98e3d5b3694354c11446c8efc8583694c33d462d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/symbol_interpretation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: symbol_interpretation_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_symbol_interpretation_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/tense.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/tense.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6a2676f087cfddcc47856d674df6d6dc673e6808 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/tense.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: tense_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_tense_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b9a03639a22c4a0e0ace177c4f433bc71b66284b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/topical_chat.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: topical_chat_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_topical_chat_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f7e1feb0525496ba8603edb58910f4522b06933c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: undo_permutation_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_undo_permutation_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21a67c437b76dd6275e1e0f8d40f77379e82648c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unit_conversion_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_unit_conversion_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..45943005c7420a81f6420bf7197259a0613ffedc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unnatural_in_context_learning_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_unnatural_in_context_learning_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..84305bf33bbdcb441efd5efc3d8da8b1ac1bc167 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/vitaminc_fact_verification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: vitaminc_fact_verification_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_vitaminc_fact_verification_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7879d1661eed9dd083ab3acabc737f756a29735e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/what_is_the_tao.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: what_is_the_tao_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_what_is_the_tao_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..71e79ae36353d0ca44548b60dfc0d623c4584edf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_sorting_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_word_sorting_multiple_choice