diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4caeacd4db2cc2c4378af8227abcca7c98724b9d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: authorship_verification_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_authorship_verification_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/causal_judgement.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/causal_judgement.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e8011772b9cbf14f5b2481ca44ae0310d91fa5eb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/causal_judgement.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: causal_judgment_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_causal_judgement_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9360f759ce40760244ee2bf98206b92a72bd9b67 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: code_line_description_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_code_line_description_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e5a28097c267c8c18388d511fb79696909de0fe2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: conlang_translation_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_conlang_translation_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ac7c1820d4c2ca98c0055c4ed5c4593eb682eb25 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/crass_ai.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: crass_ai_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_crass_ai_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c187505d302f723db6d4c7be0d6c464cce79047c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/cryobiology_spanish.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: cryobiology_spanish_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_cryobiology_spanish_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3a77ea44766557982463fffeccf0f18c8627c66b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/dark_humor_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: dark_humor_detection_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_dark_humor_detection_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5a18733fb7b5698c2649a57ad883cd3e1436130d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/discourse_marker_prediction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: discourse_marker_prediction_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_discourse_marker_prediction_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml new file mode 100644 index 0000000000000000000000000000000000000000..48d6f32e4504687fd22d6715d935eb404d279a4d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/dyck_languages.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: dyck_languages_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_dyck_languages_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0604d97d834d15a4db5adc57b4d1240cabacbb33 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: emoji_movie_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_emoji_movie_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml new file mode 100644 index 0000000000000000000000000000000000000000..32878c8ba999c2349a10ab1d00a16fe9b4ba1fc6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: entailed_polarity_hindi_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_entailed_polarity_hindi_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b80acbf1d32fa7f44b092e0fa105a2f180da252 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/geometric_shapes.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: geometric_shapes_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_geometric_shapes_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6413fb033763cdf18cf351afecae5442c680755f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/goal_step_wikihow.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: goal_step_wikihow_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_goal_step_wikihow_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed1ed278627da24ee1307ad5eee87892ee25797f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hindi_question_answering.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hindi_question_answering_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_hindi_question_answering_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5dac090fd4cfa1d4bcd739a4ec93998305ad19d1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/hinglish_toxicity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: hinglish_toxicity_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_hinglish_toxicity_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2fef6d9301484a42a5a4cd26f2df0dd241b0d104 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: human_organs_senses_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_human_organs_senses_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..93c4c24487c60205c3176c3c960d3fba770fe7f2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/identify_odd_metaphor.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: identify_odd_metaphor_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_identify_odd_metaphor_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml new file mode 100644 index 0000000000000000000000000000000000000000..89d7742d5eb11f19f80e59a2afaf1401e74e2169 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_nli.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: international_phonetic_alphabet_nli_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_international_phonetic_alphabet_nli_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c8e866e2cc525d0b716a6747a7272dbed252fd8c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/international_phonetic_alphabet_transliterate.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: international_phonetic_alphabet_transliterate_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_international_phonetic_alphabet_transliterate_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a90a82860909a072cbc0b9ff431439d5b9488b94 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: kanji_ascii_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_kanji_ascii_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml new file mode 100644 index 0000000000000000000000000000000000000000..910cec477c3d4f0201dec825585619937125e7ee --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/kannada.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: kannada_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_kannada_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4f4f2ca117bae4df904855ef02a97bb2ef837024 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: list_functions_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_list_functions_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml new file mode 100644 index 0000000000000000000000000000000000000000..84f55f644909026aec8cfd9e8cf8321e45bd6255 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_args_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_logical_args_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..592d2afa8b7b78bf25daf47b43188dec090d7406 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_deduction_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_logical_deduction_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6567189438e8418ae49ee05225ad0451bcfaf0a7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: logical_sequence_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_logical_sequence_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4ed0ad3c0d104e2ff2552937294c80c9ec5f79de --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: mathematical_induction_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_mathematical_induction_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..67109c8cbb941013dd106d486f57c7caa0a2cff4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/minute_mysteries_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: minute_mysteries_qa_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_minute_mysteries_qa_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..63d0fcda69e3109695b0a250b00b214f822e1568 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/misconceptions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: misconceptions_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_misconceptions_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3829555221c2cd7ab0359d4f4074c4c00da57adc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/moral_permissibility.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: moral_permissibility_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_moral_permissibility_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4874dd155bbffe0b3e749583d8d989dd548ea537 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: natural_instructions_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_natural_instructions_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..277d843d7cd330a599c3cc33cedd03a40c671786 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: object_counting_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_object_counting_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/operators.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/operators.yaml new file mode 100644 index 0000000000000000000000000000000000000000..951db6f99efd524ca65a7c0349f27e2f9ad45b84 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/operators.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: operators_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_operators_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5fa0eccce97772dbd70dd7c5a77ae1a3fe466c35 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/parsinlu_reading_comprehension.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: parsinlu_reading_comprehension_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_parsinlu_reading_comprehension_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de024e2e7f7caa176955e65cf83989991306e5fc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/penguins_in_a_table.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: penguins_in_a_table_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_penguins_in_a_table_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..089376dd8ee05da574c844f37ad9c1c7a23cd162 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/physical_intuition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: physical_intuition_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_physical_intuition_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d61345feb5c10ef9ded6ca39bf1f8b9505a40f2b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: polish_sequence_labeling_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_polish_sequence_labeling_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b3dd0d70e0f3cc25bd8be5fbc4b95c9c016c782 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/question_selection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: question_selection_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_question_selection_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml new file mode 100644 index 0000000000000000000000000000000000000000..93434e2c6ddf7618f4537496cb521e18d83cedcf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: riddle_sense_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_riddle_sense_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..00574b2f53d940acb881e3b80bbb736366199843 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_in_context_sparc.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: semantic_parsing_in_context_sparc_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_semantic_parsing_in_context_sparc_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e79f1ce10d385b187ca5f0ef4516c77dae291b3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: snarks_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_snarks_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/social_support.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/social_support.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1b3bd5936ed64c7d0183484c7eef577be9300c54 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/social_support.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: social_support_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_social_support_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml new file mode 100644 index 0000000000000000000000000000000000000000..263793af4254331b0f3c3c20dbe59d1594675983 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: understanding_fables_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_understanding_fables_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..68614cfddfcbb41a317d8277842e5f2e268dabe1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: unit_interpretation_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_unit_interpretation_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3dbfb0305efb863f6d698d7f3acedadb320d9a63 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: which_wiki_edit_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_which_wiki_edit_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98bc6e4b23a75abd1a4a560260b88a95034e1f0b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: winowhy_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_winowhy_multiple_choice diff --git a/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_age.yaml b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_age.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bf986547722edcae6fb8c7954e26c8321e146b8d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_age.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_english_age +dataset_name: english +process_docs: !function utils.filter_age diff --git a/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_disability.yaml b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_disability.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9830d8140e68e5fb9b48d16e61ed3904e1d5ff06 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_disability.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_english_disability +dataset_name: english +process_docs: !function utils.filter_disability diff --git a/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_nationality.yaml b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_nationality.yaml new file mode 100644 index 0000000000000000000000000000000000000000..96ac97baedbaa8bb93b1cf7c8f396976d9b00897 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_nationality.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_english_nationality +dataset_name: english +process_docs: !function utils.filter_nationality diff --git a/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_religion.yaml b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_religion.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c62882a0a96f60578d681a0dcd174e24317ecee --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_english_religion.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_english_religion +dataset_name: english +process_docs: !function utils.filter_religion diff --git a/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_french.yaml b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_french.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4eb7f0034149f08f30249758c2baff4a8f0164e9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_french.yaml @@ -0,0 +1,3 @@ +include: crows_pairs_english.yaml +task: crows_pairs_french +dataset_name: french diff --git a/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_french_disability.yaml b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_french_disability.yaml new file mode 100644 index 0000000000000000000000000000000000000000..643b16fd25e67c90f376b646bccd074e062a57f6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_french_disability.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_french_disability +dataset_name: french +process_docs: !function utils.filter_disability diff --git a/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_french_socioeconomic.yaml b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_french_socioeconomic.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b2a782cc3abd9f46794c2e15ec8c5a838cf10d46 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/crows_pairs/crows_pairs_french_socioeconomic.yaml @@ -0,0 +1,4 @@ +include: crows_pairs_english.yaml +task: crows_pairs_french_socioeconomic +dataset_name: french +process_docs: !function utils.filter_socio diff --git a/lm-evaluation/build/lib/lm_eval/tasks/paws-x/_generate_config.py b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/_generate_config.py new file mode 100644 index 0000000000000000000000000000000000000000..a1341fec89b52f3b0e9e7e778825b0d774117174 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/_generate_config.py @@ -0,0 +1,109 @@ +import argparse + +import yaml + + +# Different languages that are part of xnli. +# These correspond to dataset names (Subsets) on HuggingFace. +# A yaml file is generated by this script for each language. + +LANGUAGES = { + "de": { # German + "QUESTION_WORD": "richtig", + "YES": "Ja", + "NO": "Nein", + }, + "en": { # English + "QUESTION_WORD": "right", + "YES": "Yes", + "NO": "No", + }, + "es": { # Spanish + "QUESTION_WORD": "verdad", + "YES": "Sí", + "NO": "No", + }, + "fr": { # French + "QUESTION_WORD": "n'est-ce pas", + "YES": "Oui", + "NO": "No", + }, + "ja": { # Japanese + "QUESTION_WORD": "ですね", + "YES": "はい", + "NO": "いいえ", + }, + "ko": { # Korean + "QUESTION_WORD": "맞죠", + "YES": "예", + "NO": "아니요", + }, + "zh": { # Chinese + "QUESTION_WORD": "对吧", + "YES": "是", + "NO": "不是", + }, +} + + +def gen_lang_yamls(output_dir: str, overwrite: bool) -> None: + """ + Generate a yaml file for each language. + + :param output_dir: The directory to output the files to. + :param overwrite: Whether to overwrite files if they already exist. + """ + err = [] + for lang in LANGUAGES.keys(): + file_name = f"paws_{lang}.yaml" + try: + QUESTION_WORD = LANGUAGES[lang]["QUESTION_WORD"] + YES = LANGUAGES[lang]["YES"] + NO = LANGUAGES[lang]["NO"] + with open( + f"{output_dir}/{file_name}", "w" if overwrite else "x", encoding="utf8" + ) as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": "pawsx_template_yaml", + "dataset_name": lang, + "task": f"paws_{lang}", + "doc_to_text": "", + "doc_to_choice": f"{{{{[" + f"""sentence1+\", {QUESTION_WORD}? {YES}, \"+sentence2,""" + f""" sentence1+\", {QUESTION_WORD}? {NO}, \"+sentence2""" + f"]}}}}", + }, + f, + allow_unicode=True, + ) + except FileExistsError: + err.append(file_name) + + if len(err) > 0: + raise FileExistsError( + "Files were not created because they already exist (use --overwrite flag):" + f" {', '.join(err)}" + ) + + +def main() -> None: + """Parse CLI args and generate language-specific yaml files.""" + parser = argparse.ArgumentParser() + parser.add_argument( + "--overwrite", + default=False, + action="store_true", + help="Overwrite files if they already exist", + ) + parser.add_argument( + "--output-dir", default=".", help="Directory to write yaml files to" + ) + args = parser.parse_args() + + gen_lang_yamls(output_dir=args.output_dir, overwrite=args.overwrite) + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_en.yaml b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_en.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c667e77a74f66a94efe9e10d6ef0b54bf53645d4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_en.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: en +doc_to_choice: '{{[sentence1+", right? Yes, "+sentence2, sentence1+", right? No, "+sentence2]}}' +doc_to_text: '' +include: pawsx_template_yaml +task: paws_en diff --git a/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_es.yaml b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_es.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e58805a9c6d7fcbcd5ada9a277d7fa2283655012 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_es.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: es +doc_to_choice: '{{[sentence1+", verdad? Sí, "+sentence2, sentence1+", verdad? No, + "+sentence2]}}' +doc_to_text: '' +include: pawsx_template_yaml +task: paws_es diff --git a/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_fr.yaml b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_fr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f6973d998e53624af21ffedef577a040cc467d9d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_fr.yaml @@ -0,0 +1,7 @@ +# Generated by utils.py +dataset_name: fr +doc_to_choice: '{{[sentence1+", n''est-ce pas? Oui, "+sentence2, sentence1+", n''est-ce + pas? No, "+sentence2]}}' +doc_to_text: '' +include: pawsx_template_yaml +task: paws_fr diff --git a/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_ja.yaml b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_ja.yaml new file mode 100644 index 0000000000000000000000000000000000000000..296885b3e2790bfaa72ecc697ed7e9f3269aec47 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_ja.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: ja +doc_to_choice: '{{[sentence1+", ですね? はい, "+sentence2, sentence1+", ですね? いいえ, "+sentence2]}}' +doc_to_text: '' +include: pawsx_template_yaml +task: paws_ja diff --git a/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_ko.yaml b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_ko.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fc7034415496efcaffc50e988bd5f5f359c4fb2a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_ko.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: ko +doc_to_choice: '{{[sentence1+", 맞죠? 예, "+sentence2, sentence1+", 맞죠? 아니요, "+sentence2]}}' +doc_to_text: '' +include: pawsx_template_yaml +task: paws_ko diff --git a/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_zh.yaml b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_zh.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6d8d2ac044e71e775eafe89d8df7bc2aa6675390 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/paws-x/paws_zh.yaml @@ -0,0 +1,6 @@ +# Generated by utils.py +dataset_name: zh +doc_to_choice: '{{[sentence1+", 对吧? 是, "+sentence2, sentence1+", 对吧? 不是, "+sentence2]}}' +doc_to_text: '' +include: pawsx_template_yaml +task: paws_zh diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/README.md b/lm-evaluation/build/lib/lm_eval/tasks/pile/README.md new file mode 100644 index 0000000000000000000000000000000000000000..633b6937a104be73c13ac1ae49240aa977211d4b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/README.md @@ -0,0 +1,68 @@ +# The Pile + +### Paper +Title: The Pile: An 800GB Dataset of Diverse Text for Language Modeling + +Abstract: https://arxiv.org/abs/2101.00027 + +The Pile is a 825 GiB diverse, open source language modelling data set that consists +of 22 smaller, high-quality datasets combined together. To score well on Pile +BPB (bits per byte), a model must be able to understand many disparate domains +including books, github repositories, webpages, chat logs, and medical, physics, +math, computer science, and philosophy papers. + +Homepage: https://pile.eleuther.ai/ + +### Citation +``` +@article{pile, + title={The {P}ile: An 800GB Dataset of Diverse Text for Language Modeling}, + author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and Presser, Shawn and Leahy, Connor}, + journal={arXiv preprint arXiv:2101.00027}, + year={2020} +} +``` + +### Groups and Tasks + +#### Groups + +* `pile` + +#### Tasks + +* `pile_arxiv` +* `pile_bookcorpus2` +* `pile_books3` +* `pile_dm-mathematics` +* `pile_enron` +* `pile_europarl` +* `pile_freelaw` +* `pile_github` +* `pile_gutenberg` +* `pile_hackernews` +* `pile_nih-exporter` +* `pile_opensubtitles` +* `pile_openwebtext2` +* `pile_philpapers` +* `pile_pile-cc` +* `pile_pubmed-abstracts` +* `pile_pubmed-central` +* `pile_stackexchange` +* `pile_ubuntu-irc` +* `pile_uspto` +* `pile_wikipedia` +* `pile_youtubesubtitles` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_books3.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_books3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ab767839508fb59f4b8b24588cd7e566c14c9cff --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_books3.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_books3 +dataset_name: pile_books3 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_dm-mathematics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_dm-mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..33e0839db573b3a83386a05f1d2cb35066f11e99 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_dm-mathematics.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_dm-mathematics +dataset_name: pile_dm-mathematics diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_enron.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_enron.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e344fcfa215c5896b1d23aef1c4d45f5f0f91448 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_enron.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_enron +dataset_name: pile_enron diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_europarl.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_europarl.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aad5464be3f1153e8b98568dca003a859e89a34e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_europarl.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_europarl +dataset_name: pile_europarl diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_freelaw.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_freelaw.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1b0d4efe90dc1b6292facded5d29b4476e598cf5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_freelaw.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_freelaw +dataset_name: pile_freelaw diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_gutenberg.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_gutenberg.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc5d39736a1229a9a15f03ff1c94cc95abcdfe66 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_gutenberg.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_gutenberg +dataset_name: pile_gutenberg diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_hackernews.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_hackernews.yaml new file mode 100644 index 0000000000000000000000000000000000000000..71796902fc83943a1cdeea333488fe7974a866eb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_hackernews.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_hackernews +dataset_name: pile_hackernews diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_opensubtitles.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_opensubtitles.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a783cddd0d3d615fc89ed638d85a612fcb69e1a5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_opensubtitles.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_opensubtitles +dataset_name: pile_opensubtitles diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_pile-cc.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_pile-cc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5c934441d97e3a57ab2a15e43f1350df4a313b42 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_pile-cc.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_pile-cc +dataset_name: pile_pile-cc diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_pubmed-abstracts.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_pubmed-abstracts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a366299cb286a86d5a4de1dd5b3b6deeeaf5bfe6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_pubmed-abstracts.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_pubmed-abstracts +dataset_name: pile_pubmed-abstracts diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_stackexchange.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_stackexchange.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e68ab9d1b261e2502fa4d944ccaac95dec3ba5bc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_stackexchange.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_stackexchange +dataset_name: pile_stackexchange diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_ubuntu-irc.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_ubuntu-irc.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6d75fead9a0f718b2fb602c219a1dea42ffdba3c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_ubuntu-irc.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_ubuntu-irc +dataset_name: pile_ubuntu-irc diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_uspto.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_uspto.yaml new file mode 100644 index 0000000000000000000000000000000000000000..95bb02511deb5e19829db985de40cf5adfe232f1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_uspto.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_uspto +dataset_name: pile_uspto diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_wikipedia.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_wikipedia.yaml new file mode 100644 index 0000000000000000000000000000000000000000..11236e9e8e94d346a7402420ce9dd5e2978333fc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_wikipedia.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_wikipedia +dataset_name: pile_wikipedia diff --git a/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_youtubesubtitles.yaml b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_youtubesubtitles.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aaf7376c85dada7ead9b2e9c85648b496cfcf66c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/pile/pile_youtubesubtitles.yaml @@ -0,0 +1,3 @@ +include: pile_arxiv.yaml +task: pile_youtubesubtitles +dataset_name: pile_youtubesubtitles