applied-ai-018 commited on
Commit
b613cde
·
verified ·
1 Parent(s): fa347e8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml +4 -0
  2. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/auto_categorization.yaml +4 -0
  3. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/chess_state_tracking.yaml +4 -0
  4. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml +4 -0
  5. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/gem.yaml +4 -0
  6. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml +4 -0
  7. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_nli.yaml +4 -0
  8. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/kanji_ascii.yaml +4 -0
  9. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/language_games.yaml +4 -0
  10. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/list_functions.yaml +4 -0
  11. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/misconceptions.yaml +4 -0
  12. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml +4 -0
  13. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml +4 -0
  14. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/mult_data_wrangling.yaml +4 -0
  15. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/multiemo.yaml +4 -0
  16. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml +4 -0
  17. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml +4 -0
  18. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/rephrase.yaml +4 -0
  19. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_multiple_choice.yaml +4 -0
  20. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/snarks.yaml +4 -0
  21. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/social_support.yaml +4 -0
  22. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/sports_understanding.yaml +4 -0
  23. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml +4 -0
  24. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/understanding_fables.yaml +4 -0
  25. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/unit_interpretation.yaml +4 -0
  26. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/vitaminc_fact_verification.yaml +4 -0
  27. lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/winowhy.yaml +4 -0
  28. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml +4 -0
  29. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml +4 -0
  30. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml +4 -0
  31. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml +4 -0
  32. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml +4 -0
  33. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml +4 -0
  34. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml +4 -0
  35. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml +4 -0
  36. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/color.yaml +4 -0
  37. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml +4 -0
  38. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml +4 -0
  39. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml +4 -0
  40. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml +4 -0
  41. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml +4 -0
  42. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml +4 -0
  43. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml +4 -0
  44. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml +4 -0
  45. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml +4 -0
  46. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gem.yaml +4 -0
  47. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml +4 -0
  48. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml +4 -0
  49. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml +4 -0
  50. lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml +4 -0
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: arithmetic_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_arithmetic_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/auto_categorization.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: auto_categorization_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_auto_categorization_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/chess_state_tracking.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: chess_state_tracking_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_chess_state_tracking_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: emojis_emotion_prediction_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_emojis_emotion_prediction_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/gem.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: gem_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_gem_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: hhh_alignment_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_hhh_alignment_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_nli.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: international_phonetic_alphabet_nli_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_international_phonetic_alphabet_nli_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/kanji_ascii.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: kanji_ascii_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_kanji_ascii_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/language_games.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: language_games_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_language_games_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/list_functions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: list_functions_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_list_functions_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/misconceptions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: misconceptions_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_misconceptions_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: misconceptions_russian_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_misconceptions_russian_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: moral_permissibility_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_moral_permissibility_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/mult_data_wrangling.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: mult_data_wrangling_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_mult_data_wrangling_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/multiemo.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: multiemo_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_multiemo_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: parsinlu_reading_comprehension_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_parsinlu_reading_comprehension_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: qa_wikidata_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_qa_wikidata_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/rephrase.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: rephrase_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_rephrase_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_multiple_choice.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: simple_arithmetic_json_multiple_choice_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_simple_arithmetic_json_multiple_choice_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/snarks.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: snarks_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_snarks_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/social_support.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: social_support_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_social_support_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/sports_understanding.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: sports_understanding_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_sports_understanding_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: strategyqa_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_strategyqa_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/understanding_fables.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: understanding_fables_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_understanding_fables_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/unit_interpretation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: unit_interpretation_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_unit_interpretation_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/vitaminc_fact_verification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: vitaminc_fact_verification_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_vitaminc_fact_verification_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/winowhy.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: winowhy_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_winowhy_generate_until
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/auto_categorization.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: auto_categorization_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_auto_categorization_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: auto_debugging_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_auto_debugging_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cause_and_effect_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_cause_and_effect_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/checkmate_in_one.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: checkmate_in_one_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_checkmate_in_one_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: chess_state_tracking_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_chess_state_tracking_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/chinese_remainder_theorem.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: chinese_remainder_theorem_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_chinese_remainder_theorem_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cifar10_classification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cifar10_classification_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_cifar10_classification_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: codenames_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_codenames_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/color.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: color_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_color_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: common_morpheme_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_common_morpheme_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: conceptual_combinations_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_conceptual_combinations_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cryptonite.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cryptonite_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_cryptonite_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: disfl_qa_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_disfl_qa_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: elementary_math_qa_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_elementary_math_qa_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/empirical_judgments.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: empirical_judgments_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_empirical_judgments_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/english_russian_proverbs.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: english_russian_proverbs_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_english_russian_proverbs_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/few_shot_nlg.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: few_shot_nlg_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_few_shot_nlg_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: figure_of_speech_detection_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_figure_of_speech_detection_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gem.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: gem_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_gem_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gender_inclusive_sentences_german.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: gender_inclusive_sentences_german_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_gender_inclusive_sentences_german_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/general_knowledge.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: general_knowledge_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_general_knowledge_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/gre_reading_comprehension.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: gre_reading_comprehension_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_gre_reading_comprehension_multiple_choice
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hhh_alignment.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: hhh_alignment_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_hhh_alignment_multiple_choice