applied-ai-018 commited on
Commit
2386cd6
·
verified ·
1 Parent(s): bf00682

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml +4 -0
  2. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml +4 -0
  3. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml +4 -0
  4. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml +4 -0
  5. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml +4 -0
  6. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml +4 -0
  7. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml +4 -0
  8. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml +4 -0
  9. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml +4 -0
  10. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml +4 -0
  11. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml +4 -0
  12. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml +4 -0
  13. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml +4 -0
  14. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml +4 -0
  15. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml +4 -0
  16. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml +4 -0
  17. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml +4 -0
  18. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml +4 -0
  19. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml +4 -0
  20. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml +4 -0
  21. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml +4 -0
  22. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml +4 -0
  23. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml +4 -0
  24. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml +4 -0
  25. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/gem.yaml +4 -0
  26. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml +4 -0
  27. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml +4 -0
  28. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml +4 -0
  29. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml +4 -0
  30. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml +4 -0
  31. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml +4 -0
  32. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml +4 -0
  33. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml +4 -0
  34. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml +4 -0
  35. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml +4 -0
  36. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml +4 -0
  37. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml +4 -0
  38. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml +4 -0
  39. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml +4 -0
  40. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml +4 -0
  41. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml +4 -0
  42. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml +4 -0
  43. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml +4 -0
  44. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml +4 -0
  45. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml +4 -0
  46. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml +4 -0
  47. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml +4 -0
  48. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/operators.yaml +4 -0
  49. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml +4 -0
  50. lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml +4 -0
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/abstract_narrative_understanding.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: abstract_narrative_understanding_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_abstract_narrative_understanding_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: analogical_similarity_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_analogical_similarity_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: auto_categorization_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_auto_categorization_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: bbq_lite_json_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_bbq_lite_json_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/bridging_anaphora_resolution_barqa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: bridging_anaphora_resolution_barqa_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_bridging_anaphora_resolution_barqa_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/causal_judgment.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: causal_judgment_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_causal_judgment_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cause_and_effect_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_cause_and_effect_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: checkmate_in_one_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_checkmate_in_one_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: chess_state_tracking_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_chess_state_tracking_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cifar10_classification_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_cifar10_classification_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: code_line_description_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_code_line_description_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: codenames_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_codenames_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: common_morpheme_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_common_morpheme_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: crash_blossom_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_crash_blossom_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: crass_ai_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_crass_ai_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cryptonite_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_cryptonite_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: dark_humor_detection_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_dark_humor_detection_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: emoji_movie_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_emoji_movie_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: emojis_emotion_prediction_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_emojis_emotion_prediction_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: empirical_judgments_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_empirical_judgments_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: english_proverbs_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_english_proverbs_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: english_russian_proverbs_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_english_russian_proverbs_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: epistemic_reasoning_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_epistemic_reasoning_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/formal_fallacies_syllogisms_negation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: formal_fallacies_syllogisms_negation_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_formal_fallacies_syllogisms_negation_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/gem.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: gem_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_gem_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: general_knowledge_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_general_knowledge_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: hhh_alignment_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_hhh_alignment_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/hindu_knowledge.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: hindu_knowledge_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_hindu_knowledge_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: hinglish_toxicity_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_hinglish_toxicity_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: identify_math_theorems_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_identify_math_theorems_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: identify_odd_metaphor_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_identify_odd_metaphor_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: implicit_relations_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_implicit_relations_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: intent_recognition_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_intent_recognition_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: international_phonetic_alphabet_transliterate_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_international_phonetic_alphabet_transliterate_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: irony_identification_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_irony_identification_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: kanji_ascii_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_kanji_ascii_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: kannada_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_kannada_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/language_games.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: language_games_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_language_games_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/linguistics_puzzles.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: linguistics_puzzles_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_linguistics_puzzles_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: logical_deduction_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_logical_deduction_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/logical_fallacy_detection.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: logical_fallacy_detection_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_logical_fallacy_detection_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: logical_sequence_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_logical_sequence_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/movie_dialog_same_or_different.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: movie_dialog_same_or_different_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_movie_dialog_same_or_different_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: movie_recommendation_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_movie_recommendation_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: natural_instructions_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_natural_instructions_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/nonsense_words_grammar.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: nonsense_words_grammar_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_nonsense_words_grammar_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/novel_concepts.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: novel_concepts_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_novel_concepts_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/operators.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: operators_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_operators_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/paragraph_segmentation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: paragraph_segmentation_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_paragraph_segmentation_multiple_choice
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: penguins_in_a_table_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_penguins_in_a_table_multiple_choice