diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml new file mode 100644 index 0000000000000000000000000000000000000000..831361984ab186fb29835595db2853469ee0f7e6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/anachronisms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: anachronisms_zero_shot +include: ../generate_until_template_yaml +task: bigbench_anachronisms_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/analogical_similarity.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/analogical_similarity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5cc6550a6075a991bce4826c95188e0c7b3d2a94 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/analogical_similarity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: analogical_similarity_zero_shot +include: ../generate_until_template_yaml +task: bigbench_analogical_similarity_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d8802c1c85d3dd4ae02f04a86982b08be6e214e3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: auto_debugging_zero_shot +include: ../generate_until_template_yaml +task: bigbench_auto_debugging_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/chinese_remainder_theorem.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/chinese_remainder_theorem.yaml new file mode 100644 index 0000000000000000000000000000000000000000..872e809b8637380fd3eafa0bb4a5a57e7ce6335c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/chinese_remainder_theorem.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: chinese_remainder_theorem_zero_shot +include: ../generate_until_template_yaml +task: bigbench_chinese_remainder_theorem_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/common_morpheme.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/common_morpheme.yaml new file mode 100644 index 0000000000000000000000000000000000000000..09a8b9f407385400214d48478a6e2cf9b24a70cc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/common_morpheme.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: common_morpheme_zero_shot +include: ../generate_until_template_yaml +task: bigbench_common_morpheme_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b36c1d5c2a2ac9a6d6a0b633c2777135122610b0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: conceptual_combinations_zero_shot +include: ../generate_until_template_yaml +task: bigbench_conceptual_combinations_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/conlang_translation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/conlang_translation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ec9cccc8c72e887e047a5871c496d68498f7f576 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/conlang_translation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: conlang_translation_zero_shot +include: ../generate_until_template_yaml +task: bigbench_conlang_translation_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/crass_ai.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/crass_ai.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a65d1c334295ee8f3370305a7f563dd21c476680 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/crass_ai.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: crass_ai_zero_shot +include: ../generate_until_template_yaml +task: bigbench_crass_ai_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5fc59ee24bb455dff7cb77cfdb73ad11b7f1f572 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cryobiology_spanish_zero_shot +include: ../generate_until_template_yaml +task: bigbench_cryobiology_spanish_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cryptonite.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cryptonite.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3393c36805d6b29cd3d59481b11c8b8dd45e2910 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/cryptonite.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cryptonite_zero_shot +include: ../generate_until_template_yaml +task: bigbench_cryptonite_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f13ec2a4a0fc2dd244aefb53cb7e409fdb2bdad1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/dark_humor_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: dark_humor_detection_zero_shot +include: ../generate_until_template_yaml +task: bigbench_dark_humor_detection_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/date_understanding.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/date_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0fdca6abd643776f45e4bd7163fd0fbe01f6087f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/date_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: date_understanding_zero_shot +include: ../generate_until_template_yaml +task: bigbench_date_understanding_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..30182d9d1f884411dff255d208fd5c999209b003 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: discourse_marker_prediction_zero_shot +include: ../generate_until_template_yaml +task: bigbench_discourse_marker_prediction_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml new file mode 100644 index 0000000000000000000000000000000000000000..af958389cb784df75e9a82573087903642cef6ab --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: emoji_movie_zero_shot +include: ../generate_until_template_yaml +task: bigbench_emoji_movie_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1b26cbee762ba972b44d9404f421e975ee285487 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: empirical_judgments_zero_shot +include: ../generate_until_template_yaml +task: bigbench_empirical_judgments_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/english_proverbs.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/english_proverbs.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cdd014d9c64b37666cc54c9b7097941fcb2a54a2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/english_proverbs.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: english_proverbs_zero_shot +include: ../generate_until_template_yaml +task: bigbench_english_proverbs_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/epistemic_reasoning.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/epistemic_reasoning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f080bcf3988c2dcbcee08bae53025f6ce18ece13 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/epistemic_reasoning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: epistemic_reasoning_zero_shot +include: ../generate_until_template_yaml +task: bigbench_epistemic_reasoning_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b640b9430ad8a11758152c63ad0c77497fd16d50 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/evaluating_information_essentiality.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: evaluating_information_essentiality_zero_shot +include: ../generate_until_template_yaml +task: bigbench_evaluating_information_essentiality_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml new file mode 100644 index 0000000000000000000000000000000000000000..62dd5197439239a86c7d044d28fd936226481a02 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: fact_checker_zero_shot +include: ../generate_until_template_yaml +task: bigbench_fact_checker_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/fantasy_reasoning.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/fantasy_reasoning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b94f4c05b924d9ca001addc50ba76a03fc3a32f7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/fantasy_reasoning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: fantasy_reasoning_zero_shot +include: ../generate_until_template_yaml +task: bigbench_fantasy_reasoning_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d586c3cb372b95a43243c59e6e7abc04f61f6513 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/geometric_shapes.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: geometric_shapes_zero_shot +include: ../generate_until_template_yaml +task: bigbench_geometric_shapes_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml new file mode 100644 index 0000000000000000000000000000000000000000..22748246128e774650563a8652a94d57b0e5a338 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/goal_step_wikihow.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: goal_step_wikihow_zero_shot +include: ../generate_until_template_yaml +task: bigbench_goal_step_wikihow_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..463450b0cb275e2ea6391eb5bed44782ad3265da --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hindi_question_answering_zero_shot +include: ../generate_until_template_yaml +task: bigbench_hindi_question_answering_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hinglish_toxicity.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hinglish_toxicity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ad63dda3e7cd433a29e34282ceaec71f188fa76 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/hinglish_toxicity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hinglish_toxicity_zero_shot +include: ../generate_until_template_yaml +task: bigbench_hinglish_toxicity_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/implicatures.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/implicatures.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cf19c32aad8960cc8427d7269927fd67ae732f14 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/implicatures.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: implicatures_zero_shot +include: ../generate_until_template_yaml +task: bigbench_implicatures_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml new file mode 100644 index 0000000000000000000000000000000000000000..361f0435ef63a75bd2413100c434841f206ee5f8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/implicit_relations.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: implicit_relations_zero_shot +include: ../generate_until_template_yaml +task: bigbench_implicit_relations_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0583a17e4b456ca0d6334353fc16d8e89e95b962 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: intent_recognition_zero_shot +include: ../generate_until_template_yaml +task: bigbench_intent_recognition_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..71ad3b9d4a7f980529e64ce4ebba38a4db026f05 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: international_phonetic_alphabet_transliterate_zero_shot +include: ../generate_until_template_yaml +task: bigbench_international_phonetic_alphabet_transliterate_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..556c5a62a7e31b56732dd158efca9111fa2b8f60 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: irony_identification_zero_shot +include: ../generate_until_template_yaml +task: bigbench_irony_identification_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/kannada.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/kannada.yaml new file mode 100644 index 0000000000000000000000000000000000000000..047e7049b4a5adb0f4a16d31f0018ece6be0e72e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/kannada.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: kannada_zero_shot +include: ../generate_until_template_yaml +task: bigbench_kannada_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ea697d1f7664866050ecbd0615ea3e957a13602 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/key_value_maps.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: key_value_maps_zero_shot +include: ../generate_until_template_yaml +task: bigbench_key_value_maps_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1a8bb0640198dd3a1e288e80804a9dadeb2c806 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/known_unknowns.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: known_unknowns_zero_shot +include: ../generate_until_template_yaml +task: bigbench_known_unknowns_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/language_identification.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/language_identification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9cb7b27408b9a82c308ebac33b89e799df0763a0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/language_identification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: language_identification_zero_shot +include: ../generate_until_template_yaml +task: bigbench_language_identification_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/linguistics_puzzles.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/linguistics_puzzles.yaml new file mode 100644 index 0000000000000000000000000000000000000000..df8b729a6bad1ee9c30bd57f659f9f61d0e840e4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/linguistics_puzzles.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: linguistics_puzzles_zero_shot +include: ../generate_until_template_yaml +task: bigbench_linguistics_puzzles_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/matrixshapes.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/matrixshapes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1a162eae1b4226ba93f7dce1f0d8c46800512f9e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/matrixshapes.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: matrixshapes_zero_shot +include: ../generate_until_template_yaml +task: bigbench_matrixshapes_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml new file mode 100644 index 0000000000000000000000000000000000000000..28922b3f1b498e073db5835c94bf3ee03fa07ebd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/metaphor_boolean.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: metaphor_boolean_zero_shot +include: ../generate_until_template_yaml +task: bigbench_metaphor_boolean_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml new file mode 100644 index 0000000000000000000000000000000000000000..029a4c0a073ccaefc8975ae37937319b27f1e7ee --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: metaphor_understanding_zero_shot +include: ../generate_until_template_yaml +task: bigbench_metaphor_understanding_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/minute_mysteries_qa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/minute_mysteries_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d453fd941b840482073260cb55a095d4534baeeb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/minute_mysteries_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: minute_mysteries_qa_zero_shot +include: ../generate_until_template_yaml +task: bigbench_minute_mysteries_qa_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..edbb2b34b8cceb119a191942fb617cf99367cd40 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: modified_arithmetic_zero_shot +include: ../generate_until_template_yaml +task: bigbench_modified_arithmetic_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml new file mode 100644 index 0000000000000000000000000000000000000000..27cc6228f092b33652b0adcc5597fe71365128b0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/movie_dialog_same_or_different.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: movie_dialog_same_or_different_zero_shot +include: ../generate_until_template_yaml +task: bigbench_movie_dialog_same_or_different_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9b77c895577fa3894b4f6646702c7e237436864b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: natural_instructions_zero_shot +include: ../generate_until_template_yaml +task: bigbench_natural_instructions_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/navigate.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/navigate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..549ed37058fb3c2a9db7eb9d0d6e6ba4c2868983 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/navigate.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: navigate_zero_shot +include: ../generate_until_template_yaml +task: bigbench_navigate_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/object_counting.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/object_counting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a9fc956996d41418c40d23c255ba2abfd0a831b1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/object_counting.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: object_counting_zero_shot +include: ../generate_until_template_yaml +task: bigbench_object_counting_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/operators.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/operators.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d6aaa8b61799f665645249c19d833593576709c6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/operators.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: operators_zero_shot +include: ../generate_until_template_yaml +task: bigbench_operators_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..552f8c6068fde183ab744a1e322c41c8744070e0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/parsinlu_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: parsinlu_qa_zero_shot +include: ../generate_until_template_yaml +task: bigbench_parsinlu_qa_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c5c96cec606f6ba3e749c970b20f71d9ed200799 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/periodic_elements.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: periodic_elements_zero_shot +include: ../generate_until_template_yaml +task: bigbench_periodic_elements_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/persian_idioms.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/persian_idioms.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7e3aa0f47f46229e09b8d9bee0805eb4bbf5b671 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/persian_idioms.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: persian_idioms_zero_shot +include: ../generate_until_template_yaml +task: bigbench_persian_idioms_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ecef1581c907281e920a08651434a15313f0dc39 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/physical_intuition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physical_intuition_zero_shot +include: ../generate_until_template_yaml +task: bigbench_physical_intuition_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/physics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/physics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39bc786bae05862d66b4f358313feee70ee8d14a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/physics.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physics_zero_shot +include: ../generate_until_template_yaml +task: bigbench_physics_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/question_selection.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/question_selection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8e2321a8db770ea9e20761f5b7b117cbdeb7b583 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/question_selection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: question_selection_zero_shot +include: ../generate_until_template_yaml +task: bigbench_question_selection_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/repeat_copy_logic.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/repeat_copy_logic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bd8cd4d8563d4be2b92e18fcd48adc13d6c06f9e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/repeat_copy_logic.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: repeat_copy_logic_zero_shot +include: ../generate_until_template_yaml +task: bigbench_repeat_copy_logic_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/riddle_sense.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/riddle_sense.yaml new file mode 100644 index 0000000000000000000000000000000000000000..745cdb3244845caa9914fae7073b29f64f9773bb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/riddle_sense.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: riddle_sense_zero_shot +include: ../generate_until_template_yaml +task: bigbench_riddle_sense_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/salient_translation_error_detection.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/salient_translation_error_detection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4968e441daa4b119bcaf0e5ae5f33d2acfd5a4a6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/salient_translation_error_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: salient_translation_error_detection_zero_shot +include: ../generate_until_template_yaml +task: bigbench_salient_translation_error_detection_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/scientific_press_release.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/scientific_press_release.yaml new file mode 100644 index 0000000000000000000000000000000000000000..122f66e7da0ec45e780fbb727809452c6ef64036 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/scientific_press_release.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: scientific_press_release_zero_shot +include: ../generate_until_template_yaml +task: bigbench_scientific_press_release_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/semantic_parsing_spider.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/semantic_parsing_spider.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39307d92fc3d5f78037102153cfd4e9cc0bb4b48 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/semantic_parsing_spider.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: semantic_parsing_spider_zero_shot +include: ../generate_until_template_yaml +task: bigbench_semantic_parsing_spider_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/sentence_ambiguity.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/sentence_ambiguity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..263b453fac68a15afa2b8d4ac14328fe6e096124 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/sentence_ambiguity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: sentence_ambiguity_zero_shot +include: ../generate_until_template_yaml +task: bigbench_sentence_ambiguity_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simp_turing_concept.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simp_turing_concept.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6eb9cd87e782bdb6aa857d2550c515a2db9382fe --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simp_turing_concept.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simp_turing_concept_zero_shot +include: ../generate_until_template_yaml +task: bigbench_simp_turing_concept_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3ff5a1b1a8f51346978d03fd34cb6ad780f85f0b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_json_zero_shot +include: ../generate_until_template_yaml +task: bigbench_simple_arithmetic_json_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_multiple_targets_json.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_multiple_targets_json.yaml new file mode 100644 index 0000000000000000000000000000000000000000..393ec8843a009267ea2515fe21105b50fed672e2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_multiple_targets_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_multiple_targets_json_zero_shot +include: ../generate_until_template_yaml +task: bigbench_simple_arithmetic_multiple_targets_json_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_text_editing.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_text_editing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d3310fa2126ea3c2601e4e4e16cdf22df06e8c4f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/simple_text_editing.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_text_editing_zero_shot +include: ../generate_until_template_yaml +task: bigbench_simple_text_editing_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/strange_stories.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/strange_stories.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f5405d92e2eea8199985004288270fc1c50bce96 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/strange_stories.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: strange_stories_zero_shot +include: ../generate_until_template_yaml +task: bigbench_strange_stories_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/temporal_sequences.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/temporal_sequences.yaml new file mode 100644 index 0000000000000000000000000000000000000000..414dc51b137fb55037b5b9bc109bba116ee72d34 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/temporal_sequences.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: temporal_sequences_zero_shot +include: ../generate_until_template_yaml +task: bigbench_temporal_sequences_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/timedial.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/timedial.yaml new file mode 100644 index 0000000000000000000000000000000000000000..854d8642b93197453e8e2d5242c8c1aeb30b519f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/timedial.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: timedial_zero_shot +include: ../generate_until_template_yaml +task: bigbench_timedial_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/which_wiki_edit.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/which_wiki_edit.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bc05c377785c652d603e275b6e9df7608eeef5fc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/which_wiki_edit.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: which_wiki_edit_zero_shot +include: ../generate_until_template_yaml +task: bigbench_which_wiki_edit_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/word_unscrambling.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/word_unscrambling.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5632a79c639f23b9635a810176a5ea10343c506f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until/word_unscrambling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_unscrambling_zero_shot +include: ../generate_until_template_yaml +task: bigbench_word_unscrambling_generate_until diff --git a/lm-evaluation/build/lib/lm_eval/tasks/french_bench/README.md b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8b2a22edd49172897a42afcfe3b64974204618ca --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/README.md @@ -0,0 +1,94 @@ +# FrenchBench + +### Paper + +FrenchBench is a benchmark for evaluating French language models, introduced in the paper +[CroissantLLM: A Truly Bilingual French-English Language Model](https://arxiv.org/abs/2402.00786). +It is a collection of tasks that evaluate the ability of a language model to understand and generate French text. +This benchmark is constructed both from openly available datasets, as well as newly released manually annotated data. + +### Citation + +```bibtex +@misc{faysse2024croissantllm, + title={CroissantLLM: A Truly Bilingual French-English Language Model}, + author={Manuel Faysse and Patrick Fernandes and Nuno M. Guerreiro and António Loison and Duarte M. Alves and Caio Corro and Nicolas Boizard and João Alves and Ricardo Rei and Pedro H. Martins and Antoni Bigata Casademunt and François Yvon and André F. T. Martins and Gautier Viaud and Céline Hudelot and Pierre Colombo}, + year={2024}, + eprint={2402.00786}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +- `french_bench`: All tasks (non-perplexity based) +- `french_bench_gen`: All official generative tasks +- `french_bench_mc`: All official multiple choice tasks +- `french_bench_perplexity`: All perplexity-based tasks (0 shot is recommended) +- `french_bench_extra`: All extra tasks + +#### Tasks + + +The following tasks evaluate tasks on the French Bench dataset using various scoring methods. + - french_bench_boolqa + - french_bench_fquadv2 + - french_bench_fquadv2_bool + - french_bench_fquadv2_genq + - french_bench_fquadv2_hasAns + - french_bench_topic_based_nli + - french_bench_multifquad + - french_bench_grammar + - french_bench_vocab + - french_bench_reading_comp + - french_bench_xnli (modified XNLI) + - french_bench_orangesum_abstract + - french_bench_orangesum_title + - french_bench_trivia + - french_bench_hellaswag + - french_bench_arc_challenge + +The french bench also includes other tasks from various benchmarks: +- `belebele_fra_Latn`: Belebele French +- `wmt14-en-fr`: WMT14 English-French +- `wmt14-fr-en`: WMT14 French-English + +# Not to use in few-shot +- `crows_pairs_french`: Crows Pairs French +- `french_bench_opus_perplexity`: Opus Perplexity + + +### Usage + +```bash +# openai +lm_eval --model openai-completions --model_args engine=text-davinci-003 --tasks french_bench --limit 100 --num_fewshot 3 --batch_size auto --output_path data/french_bench/davinci-003/results_french_bench_3shot.json +lm_eval --model openai-completions --model_args engine=text-davinci-003 --tasks french_bench_opus_perplexity,crows_pairs_french --limit 100 --batch_size auto --output_path data/french_bench/davinci-003/results_french_bench2_0shot.json + + +lm_eval --model hf --model_args pretrained=gpt2 --tasks french_bench --device cuda:0 --limit 100 --num_fewshot 3 --batch_size 8 --output_path data/french_bench/gpt2/results_french_bench_3shot.json +lm_eval --model hf --model_args pretrained=gpt2 --tasks french_bench_opus_perplexity,crows_pairs_french --device cuda:0 --limit 100 --batch_size auto --output_path data/french_bench/gpt2/results_french_bench2_0shot.json + +lm_eval --model hf --model_args pretrained=meta-llama/Llama-2-7b-hf --tasks french_bench --device cuda:0 --limit 100 --num_fewshot 3 --batch_size 4 --output_path data/french_bench/llama-2-7b-hf/results_french_bench_3shot.json +lm_eval --model hf --model_args pretrained=meta-llama/Llama-2-7b-hf --tasks french_bench_opus_perplexity,crows_pairs_french --device cuda:0 --limit 100 --batch_size auto --output_path data/french_bench/llama-2-7b-hf/results_french_bench2_0shot.json +``` + +HF and Accelerate options can be added when loading a model: +```bash + accelerate launch -m lm_eval --model hf --model_args pretrained=meta-llama/Llama-2-7b-hf,dtype="float16" --tasks french_bench +``` + +### Checklist + +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? + * [x] Yes, original implementation contributed by author of the benchmark + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [x] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_fquadv2.yaml b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_fquadv2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5ffdb194a40ee267c7e7a9940351022d4692a19e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_fquadv2.yaml @@ -0,0 +1,29 @@ +include: "_default_template_yaml" +group: + - french_bench + - french_bench_extra +description: "D'après l'information dans le contexte donné, donne la réponse à la question en citant quelques mots du contexte. Si il est impossible de répondre avec les informations du contexte, répond 'Impossible'." +task: french_bench_fquadv2 +dataset_path: manu/fquad2_test +output_type: generate_until +validation_split: valid +doc_to_text: "\nContexte: {{context}}\n\nQuestion: {{question}}\n\nRéponse:" +doc_to_target: "{% if answers.text| length > 0 %}{{answers.text[0]}}{% else %}{{['Impossible']}}{% endif %}" +target_delimiter: " " +should_decontaminate: true +doc_to_decontamination_query: context +generation_kwargs: + until: + - "\n" +# filter_list: +# - name: remove_whitespace +# filter: +# - function: remove_whitespace +# - function: take_first +metric_list: + - metric: !function utils.exact + aggregation: mean + higher_is_better: true + - metric: !function utils.f1 + aggregation: mean + higher_is_better: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_fquadv2_genq.yaml b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_fquadv2_genq.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bd1c4684db873405961833907101a872e8d6f8fa --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_fquadv2_genq.yaml @@ -0,0 +1,31 @@ +include: "_default_template_yaml" +group: + - french_bench + - french_bench_gen +description: "D'après l'information dans le contexte donné, quelle question a été posée pour obtenir la réponse donnée ?" +task: french_bench_fquadv2_genq +dataset_path: manu/fquad2_test +output_type: generate_until +validation_split: valid_hasAns +test_split: test_hasAns +fewshot_split: valid_hasAns +doc_to_text: "\nContexte: {{context}}\n\nRéponse: {% if answers.text| length > 0 %}{{answers.text[0]}}{% else %}{{['Impossible']}}{% endif %}\n\nQuestion:" +doc_to_target: "{{question}}" +target_delimiter: " " +should_decontaminate: true +doc_to_decontamination_query: question +generation_kwargs: + until: + - "\n" +# filter_list: +# - name: remove_whitespace +# filter: +# - function: remove_whitespace +# - function: take_first +metric_list: + - metric: !function utils.rouge1 + higher_is_better: true + aggregation: !function utils.rouge1_agg + - metric: !function utils.f1 + aggregation: mean + higher_is_better: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_fquadv2_hasAns.yaml b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_fquadv2_hasAns.yaml new file mode 100644 index 0000000000000000000000000000000000000000..37c02af358e1d26f2823440ea23f8ae7770d87a2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_fquadv2_hasAns.yaml @@ -0,0 +1,34 @@ +include: "_default_template_yaml" +group: + - french_bench + - french_bench_gen +description: "D'après l'information dans le contexte donné, donne la réponse à la question en citant quelques mots du contexte. Si il est impossible de répondre avec les informations du contexte, répond 'Impossible'." +task: french_bench_fquadv2_hasAns +dataset_path: manu/fquad2_test +output_type: generate_until +validation_split: valid_hasAns +test_split: test_hasAns +fewshot_split: valid_hasAns +doc_to_text: "\nContexte: {{context}}\n\nQuestion: {{question}}\n\nRéponse:" +doc_to_target: "{% if answers.text| length > 0 %}{{answers.text[0]}}{% else %}{{['Impossible']}}{% endif %}" +target_delimiter: " " +should_decontaminate: true +doc_to_decontamination_query: context +generation_kwargs: + until: + - "\n" +# filter_list: +# - name: remove_whitespace +# filter: +# - function: remove_whitespace +# - function: take_first +metric_list: + - metric: !function utils.exact + aggregation: mean + higher_is_better: true + - metric: !function utils.f1 + aggregation: mean + higher_is_better: true + - metric: !function utils.rouge1 + higher_is_better: true + aggregation: !function utils.rouge1_agg diff --git a/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_grammar.yaml b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_grammar.yaml new file mode 100644 index 0000000000000000000000000000000000000000..45052ccc04134a7a194a24b19fb3d621345e1f9d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_grammar.yaml @@ -0,0 +1,20 @@ +include: "_default_template_yaml" +group: + - french_bench + - french_bench_mc +description: "Répond au mieux en complétant la question avec une des réponses proposées." +dataset_path: manu/french-bench-grammar-vocab-reading +output_type: multiple_choice +validation_split: Grammar +fewshot_split: Grammar +test_split: Grammar +#doc_to_text: "Question: {{question.strip()}}\nA: {{answerA}}\nB: {{answerB}}\nC: {{answerC}}\nD: {{answerD}}\nRéponse:" +#doc_to_choice: ["A", "B", "C", "D"] +doc_to_text: "La phrase suivante est correcte grammaticalement:\n" +doc_to_choice: "{{[question.replace('<...>', answerA), question.replace('<...>', answerB), question.replace('<...>', answerC), question.replace('<...>', answerD)]}}" +doc_to_target: '{{["answerA", "answerB", "answerC", "answerD"].index("answer" + answer)}}' +task: french_bench_grammar +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_reading_comp.yaml b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_reading_comp.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8d8c8abd8c1772193ca3d64a33edeb36b4fefd66 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_reading_comp.yaml @@ -0,0 +1,22 @@ +include: "_default_template_yaml" +group: + - french_bench + - french_bench_extra +# description: "Répond au mieux en complétant la question avec une des réponses proposées." +dataset_path: manu/french-bench-grammar-vocab-reading +output_type: multiple_choice +validation_split: Reading +fewshot_split: Reading +test_split: Reading +# doc_to_text: "Context: {{context}}\nQuestion: {{question.strip()}}\nA: {{answerA}}\nB: {{answerB}}\nC: {{answerC}}\nD: {{answerD}}\nRéponse:" +# doc_to_choice: "{{['A: '+answerA, 'B: '+answerB, 'C: '+answerC, 'D: '+answerD]}}" +doc_to_text: "Context: {{context}}\n\n" +doc_to_choice: "{{[question.replace('<...>', answerA) if '<...>' in question else question + ' ' +answerA, question.replace('<...>', answerB) if '<...>' in question else question + ' ' + answerB, question.replace('<...>', answerC) if '<...>' in question else question + ' ' + answerC, question.replace('<...>', answerD) if '<...>' in question else question + ' ' + answerD]}}" +doc_to_target: '{{["answerA", "answerB", "answerC", "answerD"].index("answer" + answer)}}' +# doc_to_choice: "{{['A: '+answerA, 'B: '+answerB, 'C: '+answerC, 'D: '+answerD]}}" +# doc_to_target: answer +task: french_bench_reading_comp +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_trivia.yaml b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_trivia.yaml new file mode 100644 index 0000000000000000000000000000000000000000..525fb781bcc716a9cd9822793485f5b0fc2fba6f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_trivia.yaml @@ -0,0 +1,36 @@ +include: "_default_template_yaml" +group: + - french_bench + - french_bench_gen +task: french_bench_trivia +dataset_path: manu/french-trivia +output_type: generate_until +validation_split: train +test_split: train +fewshot_split: train +doc_to_text: "{{Question}}\nAnswer:" +doc_to_target: "{{Answer}}" +target_delimiter: " " +should_decontaminate: true +doc_to_decontamination_query: Question +generation_kwargs: + until: + - "\n" +# filter_list: +# - name: remove_whitespace +# filter: +# - function: remove_whitespace +# - function: take_first +metric_list: + - metric: !function utils.exact + aggregation: mean + higher_is_better: true + - metric: !function utils.f1 + aggregation: mean + higher_is_better: true + - metric: !function utils.rouge1 + higher_is_better: true + aggregation: !function utils.rouge1_agg + - metric: !function utils.is_included + higher_is_better: true + aggregation: mean diff --git a/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_vocab.yaml b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_vocab.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1995c91c2515416598721bede2325ce0843d37cc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_vocab.yaml @@ -0,0 +1,20 @@ +include: "_default_template_yaml" +group: + - french_bench + - french_bench_mc +# description: "Répond au mieux en complétant la question avec une des réponses proposées." +dataset_path: manu/french-bench-grammar-vocab-reading +output_type: multiple_choice +validation_split: Vocabulary +fewshot_split: Vocabulary +test_split: Vocabulary +# doc_to_text: "Question: {{question.strip()}}\nA: {{answerA}}\nB: {{answerB}}\nC: {{answerC}}\nD: {{answerD}}\nRéponse:" +# doc_to_choice: ["A", "B", "C", "D"] +doc_to_text: "La phrase suivante est logique sémantiquement:\n" +doc_to_choice: "{{[question.replace('<...>', answerA), question.replace('<...>', answerB), question.replace('<...>', answerC), question.replace('<...>', answerD)]}}" +doc_to_target: '{{["answerA", "answerB", "answerC", "answerD"].index("answer" + answer)}}' +task: french_bench_vocab +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true diff --git a/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_wikitext_fr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_wikitext_fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c4b04fe0e6214428360a1b1955426f8675909efc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_wikitext_fr.yaml @@ -0,0 +1,25 @@ +group: + - french_bench_perplexity +task: french_bench_wikitext_fr +dataset_path: asi/wikitext_fr +dataset_name: wikitext-35 +output_type: loglikelihood_rolling +training_split: train +validation_split: validation +test_split: test +num_fewshot: 0 +doc_to_text: "" +doc_to_target: !function preprocess_wikitext.wikitext_detokenizer +process_results: !function preprocess_wikitext.process_results +should_decontaminate: true +doc_to_decontamination_query: "{{paragraph}}" +metric_list: + - metric: word_perplexity + aggregation: weighted_perplexity + higher_is_better: false + - metric: byte_perplexity + aggregation: weighted_perplexity + higher_is_better: false + - metric: bits_per_byte + aggregation: bits_per_byte + higher_is_better: false diff --git a/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_xnli.yaml b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_xnli.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7a527e4cf9d8ce6a1ff8f14a1cf03a471d06b14c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/french_bench/french_bench_xnli.yaml @@ -0,0 +1,21 @@ +include: "_default_template_yaml" +group: + - french_bench + - french_bench_extra +description: "La prémisse et l'hypothèse sont elles en accord, neutres en elles, ou en contradiction ?" +dataset_path: xnli +dataset_name: fr +output_type: multiple_choice +validation_split: validation +fewshot_split: validation +test_split: test +# doc_to_text: "\nPrémisse: {{premise}}\n\nHypothèse: {{hypothesis}}\n\nLa prémisse et l'hypothèse sont:\nA. En accord\nB. Neutre\nC. En contradiction\nRéponse:" +# doc_to_choice: "{{['A: En accord', 'B: Neutre', 'C: En contradiction']}}" +doc_to_text: "\nPrémisse: {{premise}}\n\nHypothèse: {{hypothesis}}\n\nLa prémisse et l'hypothèse sont" +doc_to_choice: "{{['en accord', 'neutres entre elles', 'en contradiction']}}" +doc_to_target: label +task: french_bench_xnli +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true