Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/analogical_similarity.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/chinese_remainder_theorem.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/common_morpheme.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/conlang_translation.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/crass_ai.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cryptonite.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/date_understanding.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/english_proverbs.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/epistemic_reasoning.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/fantasy_reasoning.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hinglish_toxicity.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/implicatures.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/kannada.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/language_identification.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/linguistics_puzzles.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/matrixshapes.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/minute_mysteries_qa.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/navigate.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/object_counting.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/operators.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/persian_idioms.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/physics.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/question_selection.yaml +4 -0
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: anachronisms_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_anachronisms_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/analogical_similarity.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: analogical_similarity_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_analogical_similarity_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: auto_debugging_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_auto_debugging_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/chinese_remainder_theorem.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: chinese_remainder_theorem_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_chinese_remainder_theorem_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/common_morpheme.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: common_morpheme_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_common_morpheme_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: conceptual_combinations_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_conceptual_combinations_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/conlang_translation.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: conlang_translation_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_conlang_translation_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/crass_ai.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: crass_ai_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_crass_ai_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: cryobiology_spanish_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_cryobiology_spanish_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cryptonite.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: cryptonite_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_cryptonite_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: dark_humor_detection_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_dark_humor_detection_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/date_understanding.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: date_understanding_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_date_understanding_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: discourse_marker_prediction_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_discourse_marker_prediction_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: emoji_movie_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_emoji_movie_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: empirical_judgments_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_empirical_judgments_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/english_proverbs.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: english_proverbs_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_english_proverbs_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/epistemic_reasoning.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: epistemic_reasoning_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_epistemic_reasoning_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: evaluating_information_essentiality_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_evaluating_information_essentiality_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: fact_checker_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_fact_checker_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/fantasy_reasoning.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: fantasy_reasoning_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_fantasy_reasoning_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: geometric_shapes_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_geometric_shapes_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: goal_step_wikihow_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_goal_step_wikihow_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: hindi_question_answering_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_hindi_question_answering_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hinglish_toxicity.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: hinglish_toxicity_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_hinglish_toxicity_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/implicatures.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: implicatures_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_implicatures_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: implicit_relations_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_implicit_relations_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: intent_recognition_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_intent_recognition_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: international_phonetic_alphabet_transliterate_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_international_phonetic_alphabet_transliterate_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: irony_identification_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_irony_identification_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/kannada.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: kannada_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_kannada_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: key_value_maps_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_key_value_maps_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: known_unknowns_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_known_unknowns_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/language_identification.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: language_identification_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_language_identification_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/linguistics_puzzles.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: linguistics_puzzles_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_linguistics_puzzles_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/matrixshapes.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: matrixshapes_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_matrixshapes_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: metaphor_boolean_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_metaphor_boolean_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: metaphor_understanding_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_metaphor_understanding_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/minute_mysteries_qa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: minute_mysteries_qa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_minute_mysteries_qa_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: modified_arithmetic_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_modified_arithmetic_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: movie_dialog_same_or_different_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_movie_dialog_same_or_different_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: natural_instructions_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_natural_instructions_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/navigate.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: navigate_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_navigate_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/object_counting.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: object_counting_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_object_counting_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/operators.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: operators_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_operators_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: parsinlu_qa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_parsinlu_qa_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: periodic_elements_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_periodic_elements_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/persian_idioms.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: persian_idioms_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_persian_idioms_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: physical_intuition_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_physical_intuition_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/physics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: physics_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_physics_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/question_selection.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: question_selection_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_question_selection_generate_until
|