Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/abstract_narrative_understanding.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/bridging_anaphora_resolution_barqa.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/code_line_description.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/implicatures.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/language_identification.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/paragraph_segmentation.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/penguins_in_a_table.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/physics_questions.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/polish_sequence_labeling.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/real_or_fake_text.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/rephrase.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/riddle_sense.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/salient_translation_error_detection.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/sentence_ambiguity.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/similarities_abstraction.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_subtasks.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_text_editing.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/social_support.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml +4 -0
- lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/suicide_risk.yaml +4 -0
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/abstract_narrative_understanding.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: abstract_narrative_understanding_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_abstract_narrative_understanding_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: anachronisms_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_anachronisms_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: arithmetic_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_arithmetic_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: authorship_verification_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_authorship_verification_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/bridging_anaphora_resolution_barqa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: bridging_anaphora_resolution_barqa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_bridging_anaphora_resolution_barqa_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: causal_judgment_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_causal_judgment_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/code_line_description.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: code_line_description_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_code_line_description_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: cryobiology_spanish_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_cryobiology_spanish_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: dark_humor_detection_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_dark_humor_detection_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: disambiguation_qa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_disambiguation_qa_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: dyck_languages_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_dyck_languages_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: emojis_emotion_prediction_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_emojis_emotion_prediction_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: evaluating_information_essentiality_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_evaluating_information_essentiality_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: geometric_shapes_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_geometric_shapes_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: goal_step_wikihow_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_goal_step_wikihow_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: hindi_question_answering_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_hindi_question_answering_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: hyperbaton_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_hyperbaton_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: identify_odd_metaphor_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_identify_odd_metaphor_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/implicatures.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: implicatures_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_implicatures_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: implicit_relations_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_implicit_relations_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: international_phonetic_alphabet_transliterate_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_international_phonetic_alphabet_transliterate_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: irony_identification_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_irony_identification_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: known_unknowns_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_known_unknowns_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/language_identification.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: language_identification_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_language_identification_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: logic_grid_puzzle_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_logic_grid_puzzle_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: metaphor_boolean_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_metaphor_boolean_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: metaphor_understanding_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_metaphor_understanding_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: misconceptions_russian_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_misconceptions_russian_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: moral_permissibility_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_moral_permissibility_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: movie_dialog_same_or_different_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_movie_dialog_same_or_different_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/paragraph_segmentation.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: paragraph_segmentation_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_paragraph_segmentation_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: parsinlu_qa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_parsinlu_qa_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: parsinlu_reading_comprehension_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_parsinlu_reading_comprehension_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/penguins_in_a_table.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: penguins_in_a_table_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_penguins_in_a_table_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: periodic_elements_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_periodic_elements_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: physical_intuition_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_physical_intuition_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/physics_questions.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: physics_questions_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_physics_questions_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/polish_sequence_labeling.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: polish_sequence_labeling_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_polish_sequence_labeling_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/qa_wikidata.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: qa_wikidata_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_qa_wikidata_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/real_or_fake_text.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: real_or_fake_text_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_real_or_fake_text_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/rephrase.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: rephrase_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_rephrase_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/riddle_sense.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: riddle_sense_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_riddle_sense_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/salient_translation_error_detection.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: salient_translation_error_detection_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_salient_translation_error_detection_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/sentence_ambiguity.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: sentence_ambiguity_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_sentence_ambiguity_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/similarities_abstraction.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: similarities_abstraction_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_similarities_abstraction_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_subtasks.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: simple_arithmetic_json_subtasks_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_simple_arithmetic_json_subtasks_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_text_editing.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: simple_text_editing_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_simple_text_editing_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/social_support.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: social_support_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_social_support_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/strategyqa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: strategyqa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_strategyqa_generate_until
|
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/suicide_risk.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: suicide_risk_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_suicide_risk_generate_until
|