Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/conlang_translation.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/cryptonite.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/fantasy_reasoning.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/formal_fallacies_syllogisms_negation.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/gender_inclusive_sentences_german.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/intersect_geometry.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/language_identification.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/nonsense_words_grammar.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/novel_concepts.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/operators.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/phrase_relatedness.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/physics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/presuppositions_as_nli.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/reasoning_about_colored_objects.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/repeat_copy_logic.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/social_iqa.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/social_support.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/temporal_sequences.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/what_is_the_tao.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/generate_until/word_unscrambling.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml +4 -0
- lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml +4 -0
lm-evaluation/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: anachronisms_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_anachronisms_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/authorship_verification.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: authorship_verification_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_authorship_verification_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: auto_debugging_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_auto_debugging_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/conlang_translation.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: conlang_translation_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_conlang_translation_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/cryptonite.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: cryptonite_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_cryptonite_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: dark_humor_detection_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_dark_humor_detection_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/emojis_emotion_prediction.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: emojis_emotion_prediction_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_emojis_emotion_prediction_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: evaluating_information_essentiality_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_evaluating_information_essentiality_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: fact_checker_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_fact_checker_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/fantasy_reasoning.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: fantasy_reasoning_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_fantasy_reasoning_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/formal_fallacies_syllogisms_negation.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: formal_fallacies_syllogisms_negation_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_formal_fallacies_syllogisms_negation_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/gender_inclusive_sentences_german.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: gender_inclusive_sentences_german_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_gender_inclusive_sentences_german_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: goal_step_wikihow_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_goal_step_wikihow_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/hyperbaton.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: hyperbaton_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_hyperbaton_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/intersect_geometry.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: intersect_geometry_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_intersect_geometry_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: key_value_maps_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_key_value_maps_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/language_identification.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: language_identification_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_language_identification_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/logic_grid_puzzle.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: logic_grid_puzzle_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_logic_grid_puzzle_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/misconceptions_russian.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: misconceptions_russian_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_misconceptions_russian_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: modified_arithmetic_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_modified_arithmetic_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/nonsense_words_grammar.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: nonsense_words_grammar_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_nonsense_words_grammar_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/novel_concepts.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: novel_concepts_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_novel_concepts_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/operators.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: operators_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_operators_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: parsinlu_qa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_parsinlu_qa_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: periodic_elements_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_periodic_elements_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/phrase_relatedness.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: phrase_relatedness_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_phrase_relatedness_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/physics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: physics_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_physics_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/presuppositions_as_nli.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: presuppositions_as_nli_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_presuppositions_as_nli_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/reasoning_about_colored_objects.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: reasoning_about_colored_objects_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_reasoning_about_colored_objects_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/repeat_copy_logic.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: repeat_copy_logic_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_repeat_copy_logic_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/social_iqa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: social_iqa_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_social_iqa_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/social_support.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: social_support_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_social_support_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/temporal_sequences.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: temporal_sequences_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_temporal_sequences_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/what_is_the_tao.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: what_is_the_tao_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_what_is_the_tao_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/generate_until/word_unscrambling.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: word_unscrambling_zero_shot
|
3 |
+
include: ../generate_until_template_yaml
|
4 |
+
task: bigbench_word_unscrambling_generate_until
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: anachronisms_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_anachronisms_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: analytic_entailment_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_analytic_entailment_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/arithmetic.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: arithmetic_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_arithmetic_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/auto_debugging.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: auto_debugging_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_auto_debugging_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: cryobiology_spanish_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_cryobiology_spanish_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: cs_algorithms_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_cs_algorithms_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/date_understanding.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: date_understanding_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_date_understanding_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: disambiguation_qa_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_disambiguation_qa_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: discourse_marker_prediction_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_discourse_marker_prediction_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: dyck_languages_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_dyck_languages_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/evaluating_information_essentiality.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: evaluating_information_essentiality_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_evaluating_information_essentiality_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/figure_of_speech_detection.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: figure_of_speech_detection_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_figure_of_speech_detection_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: human_organs_senses_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_human_organs_senses_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/hyperbaton.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: hyperbaton_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_hyperbaton_multiple_choice
|
lm-evaluation/lm_eval/tasks/bigbench/multiple_choice/implicatures.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by utils.py
|
2 |
+
dataset_name: implicatures_zero_shot
|
3 |
+
include: ../multiple_choice_template_yaml
|
4 |
+
task: bigbench_implicatures_multiple_choice
|