Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/abstract_narrative_understanding.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/analytic_entailment.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/ascii_word_recognition.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/bbq_lite_json.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/bridging_anaphora_resolution_barqa.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cause_and_effect.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/checkmate_in_one.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cifar10_classification.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/code_line_description.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/codenames.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/color.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/contextual_parametric_knowledge_conflicts.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/crash_blossom.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cs_algorithms.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/disfl_qa.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/elementary_math_qa.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/english_russian_proverbs.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/entailed_polarity.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/entailed_polarity_hindi.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/few_shot_nlg.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/figure_of_speech_detection.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/formal_fallacies_syllogisms_negation.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/gender_inclusive_sentences_german.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/general_knowledge.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/gre_reading_comprehension.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hindu_knowledge.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/human_organs_senses.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/identify_math_theorems.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/intersect_geometry.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/linguistic_mappings.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/logical_args.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/logical_deduction.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/logical_fallacy_detection.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/logical_sequence.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/mathematical_induction.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/mnist_ascii.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/movie_recommendation.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/nonsense_words_grammar.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/novel_concepts.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/odd_one_out.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/paragraph_segmentation.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/penguins_in_a_table.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/phrase_relatedness.yaml +4 -0
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/abstract_narrative_understanding.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: abstract_narrative_understanding_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_abstract_narrative_understanding_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/analytic_entailment.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: analytic_entailment_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_analytic_entailment_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/ascii_word_recognition.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: ascii_word_recognition_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_ascii_word_recognition_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: authorship_verification_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_authorship_verification_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/bbq_lite_json.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: bbq_lite_json_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_bbq_lite_json_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/bridging_anaphora_resolution_barqa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: bridging_anaphora_resolution_barqa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_bridging_anaphora_resolution_barqa_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: causal_judgment_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_causal_judgment_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cause_and_effect.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: cause_and_effect_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_cause_and_effect_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/checkmate_in_one.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: checkmate_in_one_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_checkmate_in_one_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cifar10_classification.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: cifar10_classification_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_cifar10_classification_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/code_line_description.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: code_line_description_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_code_line_description_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/codenames.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: codenames_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_codenames_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/color.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: color_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_color_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/contextual_parametric_knowledge_conflicts.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: contextual_parametric_knowledge_conflicts_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_contextual_parametric_knowledge_conflicts_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/crash_blossom.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: crash_blossom_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_crash_blossom_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cs_algorithms.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: cs_algorithms_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_cs_algorithms_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: disambiguation_qa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_disambiguation_qa_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/disfl_qa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: disfl_qa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_disfl_qa_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: dyck_languages_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_dyck_languages_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/elementary_math_qa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: elementary_math_qa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_elementary_math_qa_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/english_russian_proverbs.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: english_russian_proverbs_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_english_russian_proverbs_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/entailed_polarity.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: entailed_polarity_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_entailed_polarity_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/entailed_polarity_hindi.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: entailed_polarity_hindi_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_entailed_polarity_hindi_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/few_shot_nlg.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: few_shot_nlg_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_few_shot_nlg_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/figure_of_speech_detection.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: figure_of_speech_detection_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_figure_of_speech_detection_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/formal_fallacies_syllogisms_negation.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: formal_fallacies_syllogisms_negation_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_formal_fallacies_syllogisms_negation_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/gender_inclusive_sentences_german.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: gender_inclusive_sentences_german_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_gender_inclusive_sentences_german_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/general_knowledge.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: general_knowledge_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_general_knowledge_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/gre_reading_comprehension.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: gre_reading_comprehension_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_gre_reading_comprehension_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hindu_knowledge.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: hindu_knowledge_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_hindu_knowledge_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/human_organs_senses.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: human_organs_senses_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_human_organs_senses_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: hyperbaton_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_hyperbaton_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/identify_math_theorems.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: identify_math_theorems_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_identify_math_theorems_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: identify_odd_metaphor_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_identify_odd_metaphor_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/intersect_geometry.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: intersect_geometry_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_intersect_geometry_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/linguistic_mappings.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: linguistic_mappings_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_linguistic_mappings_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: logic_grid_puzzle_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_logic_grid_puzzle_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/logical_args.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: logical_args_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_logical_args_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/logical_deduction.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: logical_deduction_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_logical_deduction_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/logical_fallacy_detection.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: logical_fallacy_detection_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_logical_fallacy_detection_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/logical_sequence.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: logical_sequence_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_logical_sequence_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/mathematical_induction.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: mathematical_induction_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_mathematical_induction_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/mnist_ascii.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: mnist_ascii_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_mnist_ascii_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/movie_recommendation.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: movie_recommendation_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_movie_recommendation_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/nonsense_words_grammar.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: nonsense_words_grammar_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_nonsense_words_grammar_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/novel_concepts.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: novel_concepts_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_novel_concepts_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/odd_one_out.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: odd_one_out_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_odd_one_out_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/paragraph_segmentation.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: paragraph_segmentation_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_paragraph_segmentation_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/penguins_in_a_table.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: penguins_in_a_table_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_penguins_in_a_table_generate_until
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/phrase_relatedness.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: phrase_relatedness_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_phrase_relatedness_generate_until
|