applied-ai-018 commited on
Commit
6a74bee
·
verified ·
1 Parent(s): 6db8ee3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/abstract_narrative_understanding.yaml +4 -0
  2. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml +4 -0
  3. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml +4 -0
  4. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml +4 -0
  5. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/bridging_anaphora_resolution_barqa.yaml +4 -0
  6. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml +4 -0
  7. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/code_line_description.yaml +4 -0
  8. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml +4 -0
  9. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml +4 -0
  10. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml +4 -0
  11. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml +4 -0
  12. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml +4 -0
  13. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml +4 -0
  14. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml +4 -0
  15. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml +4 -0
  16. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml +4 -0
  17. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml +4 -0
  18. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml +4 -0
  19. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/implicatures.yaml +4 -0
  20. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml +4 -0
  21. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml +4 -0
  22. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml +4 -0
  23. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml +4 -0
  24. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/language_identification.yaml +4 -0
  25. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml +4 -0
  26. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml +4 -0
  27. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml +4 -0
  28. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml +4 -0
  29. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml +4 -0
  30. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml +4 -0
  31. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/paragraph_segmentation.yaml +4 -0
  32. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml +4 -0
  33. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml +4 -0
  34. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/penguins_in_a_table.yaml +4 -0
  35. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml +4 -0
  36. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml +4 -0
  37. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/physics_questions.yaml +4 -0
  38. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/polish_sequence_labeling.yaml +4 -0
  39. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml +4 -0
  40. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/real_or_fake_text.yaml +4 -0
  41. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/rephrase.yaml +4 -0
  42. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/riddle_sense.yaml +4 -0
  43. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/salient_translation_error_detection.yaml +4 -0
  44. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/sentence_ambiguity.yaml +4 -0
  45. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/similarities_abstraction.yaml +4 -0
  46. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_subtasks.yaml +4 -0
  47. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_text_editing.yaml +4 -0
  48. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/social_support.yaml +4 -0
  49. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml +4 -0
  50. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/suicide_risk.yaml +4 -0
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/abstract_narrative_understanding.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: abstract_narrative_understanding_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_abstract_narrative_understanding_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: anachronisms_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_anachronisms_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: arithmetic_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_arithmetic_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: authorship_verification_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_authorship_verification_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/bridging_anaphora_resolution_barqa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: bridging_anaphora_resolution_barqa_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_bridging_anaphora_resolution_barqa_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: causal_judgment_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_causal_judgment_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/code_line_description.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: code_line_description_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_code_line_description_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cryobiology_spanish_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_cryobiology_spanish_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: dark_humor_detection_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_dark_humor_detection_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: disambiguation_qa_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_disambiguation_qa_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: dyck_languages_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_dyck_languages_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: emojis_emotion_prediction_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_emojis_emotion_prediction_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: evaluating_information_essentiality_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_evaluating_information_essentiality_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: geometric_shapes_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_geometric_shapes_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: goal_step_wikihow_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_goal_step_wikihow_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: hindi_question_answering_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_hindi_question_answering_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: hyperbaton_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_hyperbaton_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: identify_odd_metaphor_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_identify_odd_metaphor_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/implicatures.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: implicatures_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_implicatures_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: implicit_relations_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_implicit_relations_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: international_phonetic_alphabet_transliterate_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_international_phonetic_alphabet_transliterate_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: irony_identification_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_irony_identification_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: known_unknowns_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_known_unknowns_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/language_identification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: language_identification_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_language_identification_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: logic_grid_puzzle_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_logic_grid_puzzle_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: metaphor_boolean_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_metaphor_boolean_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: metaphor_understanding_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_metaphor_understanding_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: misconceptions_russian_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_misconceptions_russian_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: moral_permissibility_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_moral_permissibility_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: movie_dialog_same_or_different_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_movie_dialog_same_or_different_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/paragraph_segmentation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: paragraph_segmentation_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_paragraph_segmentation_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: parsinlu_qa_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_parsinlu_qa_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: parsinlu_reading_comprehension_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_parsinlu_reading_comprehension_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/penguins_in_a_table.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: penguins_in_a_table_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_penguins_in_a_table_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: periodic_elements_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_periodic_elements_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: physical_intuition_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_physical_intuition_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/physics_questions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: physics_questions_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_physics_questions_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/polish_sequence_labeling.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: polish_sequence_labeling_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_polish_sequence_labeling_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: qa_wikidata_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_qa_wikidata_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/real_or_fake_text.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: real_or_fake_text_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_real_or_fake_text_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/rephrase.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: rephrase_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_rephrase_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/riddle_sense.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: riddle_sense_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_riddle_sense_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/salient_translation_error_detection.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: salient_translation_error_detection_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_salient_translation_error_detection_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/sentence_ambiguity.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: sentence_ambiguity_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_sentence_ambiguity_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/similarities_abstraction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: similarities_abstraction_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_similarities_abstraction_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_subtasks.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: simple_arithmetic_json_subtasks_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_simple_arithmetic_json_subtasks_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_text_editing.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: simple_text_editing_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_simple_text_editing_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/social_support.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: social_support_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_social_support_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: strategyqa_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_strategyqa_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/suicide_risk.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: suicide_risk_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_suicide_risk_generate_until