Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_macroeconomics.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_medical_genetics.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_security_studies.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/README.md +49 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_tasks.py +206 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until_template_yaml +18 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice_template_yaml +15 -0
- lm-evaluation/build/lib/lm_eval/tasks/bigbench/push_bigbench_dataset.py +31 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/README.md +127 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/_default_ceval_yaml +19 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/_generate_configs.py +118 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_accountant.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_art_studies.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_business_administration.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_college_economics.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_college_physics.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_college_programming.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_computer_network.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_education_science.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_environmental_impact_assessment_engineer.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_chinese.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_law.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_logic.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_marxism.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_metrology_engineer.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_middle_school_mathematics.yaml +4 -0
lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_high_school_macroeconomics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_macroeconomics"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_high_school_macroeconomics"
|
lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_medical_genetics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "medical_genetics"
|
2 |
+
"description": "فم بعملية التقييم في مجال علوم أخرى \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_medical_genetics"
|
lm-evaluation/build/lib/lm_eval/tasks/ammlu/ammlu_security_studies.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "security_studies"
|
2 |
+
"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "ammlu_security_studies"
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/README.md
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# BigBench
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
Title: `Beyond the Imitation Game: Quantifying and extrapolating the capabilities of language models`
|
6 |
+
|
7 |
+
Abstract: https://arxiv.org/abs/2206.04615
|
8 |
+
|
9 |
+
The Beyond the Imitation Game Benchmark (BIG-bench) is a collaborative benchmark intended to probe large language models and extrapolate their future capabilities.
|
10 |
+
|
11 |
+
Homepage: https://github.com/google/BIG-bench
|
12 |
+
|
13 |
+
|
14 |
+
### Citation
|
15 |
+
|
16 |
+
```
|
17 |
+
@misc{srivastava2022imitation,
|
18 |
+
title={Beyond the Imitation Game: Quantifying and extrapolating the capabilities of language models},
|
19 |
+
author={Aarohi Srivastava and Abhinav Rastogi and Abhishek Rao and Abu Awal Md Shoeb and Abubakar Abid and Adam Fisch and Adam R. Brown and Adam Santoro and Aditya Gupta and Adrià Garriga-Alonso and Agnieszka Kluska and Aitor Lewkowycz and Akshat Agarwal and Alethea Power and Alex Ray and Alex Warstadt and Alexander W. Kocurek and Ali Safaya and Ali Tazarv and Alice Xiang and Alicia Parrish and Allen Nie and Aman Hussain and Amanda Askell and Amanda Dsouza and Ambrose Slone and Ameet Rahane and Anantharaman S. Iyer and Anders Andreassen and Andrea Madotto and Andrea Santilli and Andreas Stuhlmüller and Andrew Dai and Andrew La and Andrew Lampinen and Andy Zou and Angela Jiang and Angelica Chen and Anh Vuong and Animesh Gupta and Anna Gottardi and Antonio Norelli and Anu Venkatesh and Arash Gholamidavoodi and Arfa Tabassum and Arul Menezes and Arun Kirubarajan and Asher Mullokandov and Ashish Sabharwal and Austin Herrick and Avia Efrat and Aykut Erdem and Ayla Karakaş and B. Ryan Roberts and Bao Sheng Loe and Barret Zoph and Bartłomiej Bojanowski and Batuhan Özyurt and Behnam Hedayatnia and Behnam Neyshabur and Benjamin Inden and Benno Stein and Berk Ekmekci and Bill Yuchen Lin and Blake Howald and Cameron Diao and Cameron Dour and Catherine Stinson and Cedrick Argueta and César Ferri Ramírez and Chandan Singh and Charles Rathkopf and Chenlin Meng and Chitta Baral and Chiyu Wu and Chris Callison-Burch and Chris Waites and Christian Voigt and Christopher D. Manning and Christopher Potts and Cindy Ramirez and Clara E. Rivera and Clemencia Siro and Colin Raffel and Courtney Ashcraft and Cristina Garbacea and Damien Sileo and Dan Garrette and Dan Hendrycks and Dan Kilman and Dan Roth and Daniel Freeman and Daniel Khashabi and Daniel Levy and Daniel Moseguí González and Danielle Perszyk and Danny Hernandez and Danqi Chen and Daphne Ippolito and Dar Gilboa and David Dohan and David Drakard and David Jurgens and Debajyoti Datta and Deep Ganguli and Denis Emelin and Denis Kleyko and Deniz Yuret and Derek Chen and Derek Tam and Dieuwke Hupkes and Diganta Misra and Dilyar Buzan and Dimitri Coelho Mollo and Diyi Yang and Dong-Ho Lee and Ekaterina Shutova and Ekin Dogus Cubuk and Elad Segal and Eleanor Hagerman and Elizabeth Barnes and Elizabeth Donoway and Ellie Pavlick and Emanuele Rodola and Emma Lam and Eric Chu and Eric Tang and Erkut Erdem and Ernie Chang and Ethan A. Chi and Ethan Dyer and Ethan Jerzak and Ethan Kim and Eunice Engefu Manyasi and Evgenii Zheltonozhskii and Fanyue Xia and Fatemeh Siar and Fernando Martínez-Plumed and Francesca Happé and Francois Chollet and Frieda Rong and Gaurav Mishra and Genta Indra Winata and Gerard de Melo and Germán Kruszewski and Giambattista Parascandolo and Giorgio Mariani and Gloria Wang and Gonzalo Jaimovitch-López and Gregor Betz and Guy Gur-Ari and Hana Galijasevic and Hannah Kim and Hannah Rashkin and Hannaneh Hajishirzi and Harsh Mehta and Hayden Bogar and Henry Shevlin and Hinrich Schütze and Hiromu Yakura and Hongming Zhang and Hugh Mee Wong and Ian Ng and Isaac Noble and Jaap Jumelet and Jack Geissinger and Jackson Kernion and Jacob Hilton and Jaehoon Lee and Jaime Fernández Fisac and James B. Simon and James Koppel and James Zheng and James Zou and Jan Kocoń and Jana Thompson and Jared Kaplan and Jarema Radom and Jascha Sohl-Dickstein and Jason Phang and Jason Wei and Jason Yosinski and Jekaterina Novikova and Jelle Bosscher and Jennifer Marsh and Jeremy Kim and Jeroen Taal and Jesse Engel and Jesujoba Alabi and Jiacheng Xu and Jiaming Song and Jillian Tang and Joan Waweru and John Burden and John Miller and John U. Balis and Jonathan Berant and Jörg Frohberg and Jos Rozen and Jose Hernandez-Orallo and Joseph Boudeman and Joseph Jones and Joshua B. Tenenbaum and Joshua S. Rule and Joyce Chua and Kamil Kanclerz and Karen Livescu and Karl Krauth and Karthik Gopalakrishnan and Katerina Ignatyeva and Katja Markert and Kaustubh D. Dhole and Kevin Gimpel and Kevin Omondi and Kory Mathewson and Kristen Chiafullo and Ksenia Shkaruta and Kumar Shridhar and Kyle McDonell and Kyle Richardson and Laria Reynolds and Leo Gao and Li Zhang and Liam Dugan and Lianhui Qin and Lidia Contreras-Ochando and Louis-Philippe Morency and Luca Moschella and Lucas Lam and Lucy Noble and Ludwig Schmidt and Luheng He and Luis Oliveros Colón and Luke Metz and Lütfi Kerem Şenel and Maarten Bosma and Maarten Sap and Maartje ter Hoeve and Maheen Farooqi and Manaal Faruqui and Mantas Mazeika and Marco Baturan and Marco Marelli and Marco Maru and Maria Jose Ramírez Quintana and Marie Tolkiehn and Mario Giulianelli and Martha Lewis and Martin Potthast and Matthew L. Leavitt and Matthias Hagen and Mátyás Schubert and Medina Orduna Baitemirova and Melody Arnaud and Melvin McElrath and Michael A. Yee and Michael Cohen and Michael Gu and Michael Ivanitskiy and Michael Starritt and Michael Strube and Michał Swędrowski and Michele Bevilacqua and Michihiro Yasunaga and Mihir Kale and Mike Cain and Mimee Xu and Mirac Suzgun and Mo Tiwari and Mohit Bansal and Moin Aminnaseri and Mor Geva and Mozhdeh Gheini and Mukund Varma T and Nanyun Peng and Nathan Chi and Nayeon Lee and Neta Gur-Ari Krakover and Nicholas Cameron and Nicholas Roberts and Nick Doiron and Nikita Nangia and Niklas Deckers and Niklas Muennighoff and Nitish Shirish Keskar and Niveditha S. Iyer and Noah Constant and Noah Fiedel and Nuan Wen and Oliver Zhang and Omar Agha and Omar Elbaghdadi and Omer Levy and Owain Evans and Pablo Antonio Moreno Casares and Parth Doshi and Pascale Fung and Paul Pu Liang and Paul Vicol and Pegah Alipoormolabashi and Peiyuan Liao and Percy Liang and Peter Chang and Peter Eckersley and Phu Mon Htut and Pinyu Hwang and Piotr Miłkowski and Piyush Patil and Pouya Pezeshkpour and Priti Oli and Qiaozhu Mei and Qing Lyu and Qinlang Chen and Rabin Banjade and Rachel Etta Rudolph and Raefer Gabriel and Rahel Habacker and Ramón Risco Delgado and Raphaël Millière and Rhythm Garg and Richard Barnes and Rif A. Saurous and Riku Arakawa and Robbe Raymaekers and Robert Frank and Rohan Sikand and Roman Novak and Roman Sitelew and Ronan LeBras and Rosanne Liu and Rowan Jacobs and Rui Zhang and Ruslan Salakhutdinov and Ryan Chi and Ryan Lee and Ryan Stovall and Ryan Teehan and Rylan Yang and Sahib Singh and Saif M. Mohammad and Sajant Anand and Sam Dillavou and Sam Shleifer and Sam Wiseman and Samuel Gruetter and Samuel R. Bowman and Samuel S. Schoenholz and Sanghyun Han and Sanjeev Kwatra and Sarah A. Rous and Sarik Ghazarian and Sayan Ghosh and Sean Casey and Sebastian Bischoff and Sebastian Gehrmann and Sebastian Schuster and Sepideh Sadeghi and Shadi Hamdan and Sharon Zhou and Shashank Srivastava and Sherry Shi and Shikhar Singh and Shima Asaadi and Shixiang Shane Gu and Shubh Pachchigar and Shubham Toshniwal and Shyam Upadhyay and Shyamolima and Debnath and Siamak Shakeri and Simon Thormeyer and Simone Melzi and Siva Reddy and Sneha Priscilla Makini and Soo-Hwan Lee and Spencer Torene and Sriharsha Hatwar and Stanislas Dehaene and Stefan Divic and Stefano Ermon and Stella Biderman and Stephanie Lin and Stephen Prasad and Steven T. Piantadosi and Stuart M. Shieber and Summer Misherghi and Svetlana Kiritchenko and Swaroop Mishra and Tal Linzen and Tal Schuster and Tao Li and Tao Yu and Tariq Ali and Tatsu Hashimoto and Te-Lin Wu and Théo Desbordes and Theodore Rothschild and Thomas Phan and Tianle Wang and Tiberius Nkinyili and Timo Schick and Timofei Kornev and Timothy Telleen-Lawton and Titus Tunduny and Tobias Gerstenberg and Trenton Chang and Trishala Neeraj and Tushar Khot and Tyler Shultz and Uri Shaham and Vedant Misra and Vera Demberg and Victoria Nyamai and Vikas Raunak and Vinay Ramasesh and Vinay Uday Prabhu and Vishakh Padmakumar and Vivek Srikumar and William Fedus and William Saunders and William Zhang and Wout Vossen and Xiang Ren and Xiaoyu Tong and Xinran Zhao and Xinyi Wu and Xudong Shen and Yadollah Yaghoobzadeh and Yair Lakretz and Yangqiu Song and Yasaman Bahri and Yejin Choi and Yichi Yang and Yiding Hao and Yifu Chen and Yonatan Belinkov and Yu Hou and Yufang Hou and Yuntao Bai and Zachary Seid and Zhuoye Zhao and Zijian Wang and Zijie J. Wang and Zirui Wang and Ziyi Wu},
|
20 |
+
year={2022},
|
21 |
+
eprint={2206.04615},
|
22 |
+
archivePrefix={arXiv},
|
23 |
+
primaryClass={cs.CL}
|
24 |
+
}
|
25 |
+
```
|
26 |
+
|
27 |
+
### Groups and Tasks
|
28 |
+
|
29 |
+
#### Groups
|
30 |
+
|
31 |
+
* `group_name`: `Short description`
|
32 |
+
|
33 |
+
#### Tasks
|
34 |
+
|
35 |
+
* `task_name`: `1-sentence description of what this particular task does`
|
36 |
+
* `task_name2`: ...
|
37 |
+
|
38 |
+
### Checklist
|
39 |
+
|
40 |
+
For adding novel benchmarks/datasets to the library:
|
41 |
+
* [ ] Is the task an existing benchmark in the literature?
|
42 |
+
* [ ] Have you referenced the original paper that introduced the task?
|
43 |
+
* [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test?
|
44 |
+
|
45 |
+
|
46 |
+
If other tasks on this dataset are already supported:
|
47 |
+
* [ ] Is the "Main" variant of this task clearly denoted?
|
48 |
+
* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
49 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_tasks.py
ADDED
@@ -0,0 +1,206 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import yaml
|
4 |
+
|
5 |
+
|
6 |
+
all_subtasks = [
|
7 |
+
"abstract_narrative_understanding",
|
8 |
+
"anachronisms",
|
9 |
+
"analogical_similarity",
|
10 |
+
"analytic_entailment",
|
11 |
+
"arithmetic",
|
12 |
+
"ascii_word_recognition",
|
13 |
+
"authorship_verification",
|
14 |
+
"auto_categorization",
|
15 |
+
"auto_debugging",
|
16 |
+
"bbq_lite_json",
|
17 |
+
"bridging_anaphora_resolution_barqa",
|
18 |
+
"causal_judgment",
|
19 |
+
"cause_and_effect",
|
20 |
+
"checkmate_in_one",
|
21 |
+
"chess_state_tracking",
|
22 |
+
"chinese_remainder_theorem",
|
23 |
+
"cifar10_classification",
|
24 |
+
"code_line_description",
|
25 |
+
"codenames",
|
26 |
+
"color",
|
27 |
+
"common_morpheme",
|
28 |
+
"conceptual_combinations",
|
29 |
+
"conlang_translation",
|
30 |
+
"contextual_parametric_knowledge_conflicts",
|
31 |
+
"crash_blossom",
|
32 |
+
"crass_ai",
|
33 |
+
"cryobiology_spanish",
|
34 |
+
"cryptonite",
|
35 |
+
"cs_algorithms",
|
36 |
+
"dark_humor_detection",
|
37 |
+
"date_understanding",
|
38 |
+
"disambiguation_qa",
|
39 |
+
"discourse_marker_prediction",
|
40 |
+
"disfl_qa",
|
41 |
+
"dyck_languages",
|
42 |
+
"elementary_math_qa",
|
43 |
+
"emoji_movie",
|
44 |
+
"emojis_emotion_prediction",
|
45 |
+
"empirical_judgments",
|
46 |
+
"english_proverbs",
|
47 |
+
"english_russian_proverbs",
|
48 |
+
"entailed_polarity",
|
49 |
+
"entailed_polarity_hindi",
|
50 |
+
"epistemic_reasoning",
|
51 |
+
"evaluating_information_essentiality",
|
52 |
+
"fact_checker",
|
53 |
+
"fantasy_reasoning",
|
54 |
+
"few_shot_nlg",
|
55 |
+
"figure_of_speech_detection",
|
56 |
+
"formal_fallacies_syllogisms_negation",
|
57 |
+
"gem",
|
58 |
+
"gender_inclusive_sentences_german",
|
59 |
+
"general_knowledge",
|
60 |
+
"geometric_shapes",
|
61 |
+
"goal_step_wikihow",
|
62 |
+
"gre_reading_comprehension",
|
63 |
+
"hhh_alignment",
|
64 |
+
"hindi_question_answering",
|
65 |
+
"hindu_knowledge",
|
66 |
+
"hinglish_toxicity",
|
67 |
+
"human_organs_senses",
|
68 |
+
"hyperbaton",
|
69 |
+
"identify_math_theorems",
|
70 |
+
"identify_odd_metaphor",
|
71 |
+
"implicatures",
|
72 |
+
"implicit_relations",
|
73 |
+
"intent_recognition",
|
74 |
+
"international_phonetic_alphabet_nli",
|
75 |
+
"international_phonetic_alphabet_transliterate",
|
76 |
+
"intersect_geometry",
|
77 |
+
"irony_identification",
|
78 |
+
"kanji_ascii",
|
79 |
+
"kannada",
|
80 |
+
"key_value_maps",
|
81 |
+
"known_unknowns",
|
82 |
+
"language_games",
|
83 |
+
"language_identification",
|
84 |
+
"linguistic_mappings",
|
85 |
+
"linguistics_puzzles",
|
86 |
+
"list_functions",
|
87 |
+
"logic_grid_puzzle",
|
88 |
+
"logical_args",
|
89 |
+
"logical_deduction",
|
90 |
+
"logical_fallacy_detection",
|
91 |
+
"logical_sequence",
|
92 |
+
"mathematical_induction",
|
93 |
+
"matrixshapes",
|
94 |
+
"metaphor_boolean",
|
95 |
+
"metaphor_understanding",
|
96 |
+
"minute_mysteries_qa",
|
97 |
+
"misconceptions",
|
98 |
+
"misconceptions_russian",
|
99 |
+
"mnist_ascii",
|
100 |
+
"modified_arithmetic",
|
101 |
+
"moral_permissibility",
|
102 |
+
"movie_dialog_same_or_different",
|
103 |
+
"movie_recommendation",
|
104 |
+
"mult_data_wrangling",
|
105 |
+
"multiemo",
|
106 |
+
"natural_instructions",
|
107 |
+
"navigate",
|
108 |
+
"nonsense_words_grammar",
|
109 |
+
"novel_concepts",
|
110 |
+
"object_counting",
|
111 |
+
"odd_one_out",
|
112 |
+
"operators",
|
113 |
+
"paragraph_segmentation",
|
114 |
+
"parsinlu_qa",
|
115 |
+
"parsinlu_reading_comprehension",
|
116 |
+
"penguins_in_a_table",
|
117 |
+
"periodic_elements",
|
118 |
+
"persian_idioms",
|
119 |
+
"phrase_relatedness",
|
120 |
+
"physical_intuition",
|
121 |
+
"physics",
|
122 |
+
"physics_questions",
|
123 |
+
"play_dialog_same_or_different",
|
124 |
+
"polish_sequence_labeling",
|
125 |
+
"presuppositions_as_nli",
|
126 |
+
"qa_wikidata",
|
127 |
+
"question_selection",
|
128 |
+
"real_or_fake_text",
|
129 |
+
"reasoning_about_colored_objects",
|
130 |
+
"repeat_copy_logic",
|
131 |
+
"rephrase",
|
132 |
+
"riddle_sense",
|
133 |
+
"ruin_names",
|
134 |
+
"salient_translation_error_detection",
|
135 |
+
"scientific_press_release",
|
136 |
+
"semantic_parsing_in_context_sparc",
|
137 |
+
"semantic_parsing_spider",
|
138 |
+
"sentence_ambiguity",
|
139 |
+
"similarities_abstraction",
|
140 |
+
"simp_turing_concept",
|
141 |
+
"simple_arithmetic_json",
|
142 |
+
"simple_arithmetic_json_multiple_choice",
|
143 |
+
"simple_arithmetic_json_subtasks",
|
144 |
+
"simple_arithmetic_multiple_targets_json",
|
145 |
+
"simple_ethical_questions",
|
146 |
+
"simple_text_editing",
|
147 |
+
"snarks",
|
148 |
+
"social_iqa",
|
149 |
+
"social_support",
|
150 |
+
"sports_understanding",
|
151 |
+
"strange_stories",
|
152 |
+
"strategyqa",
|
153 |
+
"sufficient_information",
|
154 |
+
"suicide_risk",
|
155 |
+
"swahili_english_proverbs",
|
156 |
+
"swedish_to_german_proverbs",
|
157 |
+
"symbol_interpretation",
|
158 |
+
"temporal_sequences",
|
159 |
+
"tense",
|
160 |
+
"timedial",
|
161 |
+
"topical_chat",
|
162 |
+
"tracking_shuffled_objects",
|
163 |
+
"understanding_fables",
|
164 |
+
"undo_permutation",
|
165 |
+
"unit_conversion",
|
166 |
+
"unit_interpretation",
|
167 |
+
"unnatural_in_context_learning",
|
168 |
+
"vitaminc_fact_verification",
|
169 |
+
"what_is_the_tao",
|
170 |
+
"which_wiki_edit",
|
171 |
+
"winowhy",
|
172 |
+
"word_sorting",
|
173 |
+
"word_unscrambling",
|
174 |
+
]
|
175 |
+
|
176 |
+
|
177 |
+
def main() -> None:
|
178 |
+
for path, task_type in zip(
|
179 |
+
["multiple_choice", "generate_until"],
|
180 |
+
["multiple_choice_template_yaml", "generate_until_template_yaml"],
|
181 |
+
):
|
182 |
+
os.makedirs(path, exist_ok=True)
|
183 |
+
for task in all_subtasks:
|
184 |
+
file_name = f"{task}.yaml"
|
185 |
+
try:
|
186 |
+
with open(f"{path}/{file_name}", "w", encoding="utf-8") as f:
|
187 |
+
f.write("# Generated by utils.py\n")
|
188 |
+
yaml.dump(
|
189 |
+
{
|
190 |
+
"include": f"../{task_type}",
|
191 |
+
"task": "bigbench_"
|
192 |
+
+ task
|
193 |
+
+ "_{}".format(task_type.split("_template_yaml")[0]),
|
194 |
+
"dataset_name": task
|
195 |
+
+ "_zero_shot", # zero-shot version of the dataset
|
196 |
+
},
|
197 |
+
f,
|
198 |
+
width=float("inf"),
|
199 |
+
allow_unicode=True,
|
200 |
+
)
|
201 |
+
except FileExistsError:
|
202 |
+
pass
|
203 |
+
|
204 |
+
|
205 |
+
if __name__ == "__main__":
|
206 |
+
main()
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/generate_until_template_yaml
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: bigbench_generate_until
|
2 |
+
dataset_path: hails/bigbench
|
3 |
+
output_type: generate_until
|
4 |
+
dataset_kwargs:
|
5 |
+
# num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods
|
6 |
+
# subtask_name: null
|
7 |
+
test_split: default
|
8 |
+
doc_to_text: inputs
|
9 |
+
doc_to_target: "{{targets[0]}}"
|
10 |
+
generation_kwargs:
|
11 |
+
max_gen_toks: 128
|
12 |
+
metric_list:
|
13 |
+
- metric: exact_match
|
14 |
+
aggregation: mean
|
15 |
+
higher_is_better: true
|
16 |
+
ignore_punctuation: true
|
17 |
+
metadata:
|
18 |
+
version: 1.0
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/multiple_choice_template_yaml
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: bigbench_multiple_choice
|
2 |
+
dataset_path: hails/bigbench
|
3 |
+
dataset_kwargs:
|
4 |
+
# num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods
|
5 |
+
# subtask_name: null
|
6 |
+
output_type: multiple_choice
|
7 |
+
test_split: default
|
8 |
+
doc_to_text: inputs
|
9 |
+
doc_to_target: "{{multiple_choice_targets.index(targets[0])}}"
|
10 |
+
doc_to_choice: "{{multiple_choice_targets}}"
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
# TODO: brier score and other metrics
|
14 |
+
metadata:
|
15 |
+
version: 0.0
|
lm-evaluation/build/lib/lm_eval/tasks/bigbench/push_bigbench_dataset.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
A utility script that pushes all Bigbench subtasks from their form in the `bigbench` HF dataset
|
3 |
+
into `{org name}/bigbench`.
|
4 |
+
|
5 |
+
Prior to running, log into HF Hub for the target HF hub org via `huggingface-cli login`.
|
6 |
+
|
7 |
+
Requires the installation of
|
8 |
+
`pip install "bigbench @ https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz"`
|
9 |
+
and is included so that the bigbench dependency can be avoided.
|
10 |
+
"""
|
11 |
+
import bigbench.api.util as bb_utils
|
12 |
+
import datasets
|
13 |
+
from tqdm import tqdm
|
14 |
+
|
15 |
+
|
16 |
+
all_task_names = bb_utils.get_all_json_task_names()
|
17 |
+
|
18 |
+
num_shots = [0]
|
19 |
+
|
20 |
+
for shots in num_shots:
|
21 |
+
for task_name in tqdm(all_task_names):
|
22 |
+
try:
|
23 |
+
print(f"Loading '{task_name}' with num_shots={shots}...")
|
24 |
+
task_ds = datasets.load_dataset("bigbench", name=task_name, num_shots=shots)
|
25 |
+
|
26 |
+
print(f"Pushing '{task_name}' with num_shots={shots}...")
|
27 |
+
task_ds.push_to_hub("hails/bigbench", task_name + "_zero_shot")
|
28 |
+
|
29 |
+
del task_ds
|
30 |
+
except Exception as e:
|
31 |
+
raise e
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/README.md
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# C-Eval (Validation)
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models
|
5 |
+
https://arxiv.org/pdf/2305.08322.pdf
|
6 |
+
|
7 |
+
C-Eval is a comprehensive Chinese evaluation suite for foundation models.
|
8 |
+
It consists of 13948 multi-choice questions spanning 52 diverse disciplines
|
9 |
+
and four difficulty levels.
|
10 |
+
|
11 |
+
Homepage: https://cevalbenchmark.com/
|
12 |
+
|
13 |
+
### Citation
|
14 |
+
|
15 |
+
```bibtex
|
16 |
+
@article{huang2023ceval,
|
17 |
+
title={C-Eval: A Multi-Level Multi-Discipline Chinese Evaluation Suite for Foundation Models},
|
18 |
+
author={Huang, Yuzhen and Bai, Yuzhuo and Zhu, Zhihao and Zhang, Junlei and Zhang, Jinghan and Su, Tangjun and Liu, Junteng and Lv, Chuancheng and Zhang, Yikai and Lei, Jiayi and Fu, Yao and Sun, Maosong and He, Junxian},
|
19 |
+
journal={arXiv preprint arXiv:2305.08322},
|
20 |
+
year={2023}
|
21 |
+
}
|
22 |
+
```
|
23 |
+
|
24 |
+
|
25 |
+
SUBJECTS = {
|
26 |
+
"computer_network":"计算机网络",
|
27 |
+
"operating_system":"操作系统",
|
28 |
+
"computer_architecture":"计算机组成",
|
29 |
+
"college_programming":"大学编程",
|
30 |
+
"college_physics":"大学物理",
|
31 |
+
"college_chemistry":"大学化学",
|
32 |
+
"advanced_mathematics":"高等数学",
|
33 |
+
"probability_and_statistics":"概率统计",
|
34 |
+
"discrete_mathematics":"离散数学",
|
35 |
+
"electrical_engineer":"注册电气工程师",
|
36 |
+
"metrology_engineer":"注册计量师",
|
37 |
+
"high_school_mathematics":"高中数学",
|
38 |
+
"high_school_physics":"高中物理",
|
39 |
+
"high_school_chemistry":"高中化学",
|
40 |
+
"high_school_biology":"高中生物",
|
41 |
+
"middle_school_mathematics":"初中数学",
|
42 |
+
"middle_school_biology":"初中生物",
|
43 |
+
"middle_school_physics":"初中物理",
|
44 |
+
"middle_school_chemistry":"初中化学",
|
45 |
+
"veterinary_medicine":"兽医学",
|
46 |
+
"college_economics":"大学经济学",
|
47 |
+
"business_administration":"工商管理",
|
48 |
+
"marxism":"马克思主义基本原理",
|
49 |
+
"mao_zedong_thought":"毛泽东思想和中国特色社会主义理论体系概论",
|
50 |
+
"education_science":"教育学",
|
51 |
+
"teacher_qualification":"教师资格",
|
52 |
+
"high_school_politics":"高中政治",
|
53 |
+
"high_school_geography":"高中地理",
|
54 |
+
"middle_school_politics":"初中政治",
|
55 |
+
"middle_school_geography":"初中地理",
|
56 |
+
"modern_chinese_history":"近代史纲要",
|
57 |
+
"ideological_and_moral_cultivation":"思想道德修养与法律基础",
|
58 |
+
"logic":"逻辑学",
|
59 |
+
"law":"法学",
|
60 |
+
"chinese_language_and_literature":"中国语言文学",
|
61 |
+
"art_studies":"艺术学",
|
62 |
+
"professional_tour_guide":"导游资格",
|
63 |
+
"legal_professional":"法律职业资格",
|
64 |
+
"high_school_chinese":"高中语文",
|
65 |
+
"high_school_history":"高中历史",
|
66 |
+
"middle_school_history":"初中历史",
|
67 |
+
"civil_servant":"公务员",
|
68 |
+
"sports_science":"体育学",
|
69 |
+
"plant_protection":"植物保护",
|
70 |
+
"basic_medicine":"基础医学",
|
71 |
+
"clinical_medicine":"临床医学",
|
72 |
+
"urban_and_rural_planner":"注册城乡规划师",
|
73 |
+
"accountant":"注册会计师",
|
74 |
+
"fire_engineer":"注册消防工程师",
|
75 |
+
"environmental_impact_assessment_engineer":"环境影响评价工程师",
|
76 |
+
"tax_accountant":"税务师",
|
77 |
+
"physician":"医师资格"
|
78 |
+
}
|
79 |
+
|
80 |
+
|
81 |
+
# CMMLU
|
82 |
+
|
83 |
+
### Paper
|
84 |
+
|
85 |
+
CMMLU: Measuring massive multitask language understanding in Chinese
|
86 |
+
https://arxiv.org/abs/2306.09212
|
87 |
+
|
88 |
+
CMMLU is a comprehensive evaluation benchmark specifically designed to evaluate the knowledge and reasoning abilities of LLMs within the context of Chinese language and culture.
|
89 |
+
CMMLU covers a wide range of subjects, comprising 67 topics that span from elementary to advanced professional levels.
|
90 |
+
|
91 |
+
Homepage: https://github.com/haonan-li/CMMLU
|
92 |
+
|
93 |
+
### Citation
|
94 |
+
|
95 |
+
```bibtex
|
96 |
+
@misc{li2023cmmlu,
|
97 |
+
title={CMMLU: Measuring massive multitask language understanding in Chinese},
|
98 |
+
author={Haonan Li and Yixuan Zhang and Fajri Koto and Yifei Yang and Hai Zhao and Yeyun Gong and Nan Duan and Timothy Baldwin},
|
99 |
+
year={2023},
|
100 |
+
eprint={2306.09212},
|
101 |
+
archivePrefix={arXiv},
|
102 |
+
primaryClass={cs.CL}
|
103 |
+
}
|
104 |
+
```
|
105 |
+
|
106 |
+
### Groups and Tasks
|
107 |
+
|
108 |
+
#### Groups
|
109 |
+
|
110 |
+
- `ceval-valid`: All 52 subjects of the C-Eval dataset, evaluated following the methodology in MMLU's original implementation. This implementation consists solely of the validation set of C-Eval, as the test set requires submission of model predictions to an external site.
|
111 |
+
|
112 |
+
#### Tasks
|
113 |
+
|
114 |
+
|
115 |
+
The following tasks evaluate subjects in the C-Eval dataset using loglikelihood-based multiple-choice scoring:
|
116 |
+
- `ceval-valid_{subject_english}`
|
117 |
+
|
118 |
+
### Checklist
|
119 |
+
|
120 |
+
* [x] Is the task an existing benchmark in the literature?
|
121 |
+
* [x] Have you referenced the original paper that introduced the task?
|
122 |
+
* [ ] If yes, does the original paper provide a reference implementation?
|
123 |
+
|
124 |
+
If other tasks on this dataset are already supported:
|
125 |
+
* [x] Is the "Main" variant of this task clearly denoted?
|
126 |
+
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
127 |
+
* [ ] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/_default_ceval_yaml
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
group: ceval-valid
|
2 |
+
dataset_path: ceval/ceval-exam
|
3 |
+
validation_split: val
|
4 |
+
fewshot_split: dev
|
5 |
+
fewshot_config:
|
6 |
+
sampler: first_n
|
7 |
+
output_type: multiple_choice
|
8 |
+
doc_to_text: "{{question.strip()}}\nA. {{A}}\nB. {{B}}\nC. {{C}}\nD. {{D}}\n答案:"
|
9 |
+
doc_to_choice: ["A", "B", "C", "D"]
|
10 |
+
doc_to_target: "{{['A', 'B', 'C', 'D'].index(answer)}}"
|
11 |
+
metric_list:
|
12 |
+
- metric: acc
|
13 |
+
aggregation: mean
|
14 |
+
higher_is_better: true
|
15 |
+
- metric: acc_norm
|
16 |
+
aggregation: mean
|
17 |
+
higher_is_better: true
|
18 |
+
metadata:
|
19 |
+
version: 1.0
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/_generate_configs.py
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Take in a YAML, and output all other splits with this YAML
|
3 |
+
"""
|
4 |
+
import argparse
|
5 |
+
import os
|
6 |
+
|
7 |
+
import yaml
|
8 |
+
from tqdm import tqdm
|
9 |
+
|
10 |
+
from lm_eval.logger import eval_logger
|
11 |
+
|
12 |
+
|
13 |
+
SUBJECTS = {
|
14 |
+
"computer_network": "计算机网络",
|
15 |
+
"operating_system": "操作系统",
|
16 |
+
"computer_architecture": "计算机组成",
|
17 |
+
"college_programming": "大学编程",
|
18 |
+
"college_physics": "大学物理",
|
19 |
+
"college_chemistry": "大学化学",
|
20 |
+
"advanced_mathematics": "高等数学",
|
21 |
+
"probability_and_statistics": "概率统计",
|
22 |
+
"discrete_mathematics": "离散数学",
|
23 |
+
"electrical_engineer": "注册电气工程师",
|
24 |
+
"metrology_engineer": "注册计量师",
|
25 |
+
"high_school_mathematics": "高中数学",
|
26 |
+
"high_school_physics": "高中物理",
|
27 |
+
"high_school_chemistry": "高中化学",
|
28 |
+
"high_school_biology": "高中生物",
|
29 |
+
"middle_school_mathematics": "初中数学",
|
30 |
+
"middle_school_biology": "初中生物",
|
31 |
+
"middle_school_physics": "初中物理",
|
32 |
+
"middle_school_chemistry": "初中化学",
|
33 |
+
"veterinary_medicine": "兽医学",
|
34 |
+
"college_economics": "大学经济学",
|
35 |
+
"business_administration": "工商管理",
|
36 |
+
"marxism": "马克思主义基本原理",
|
37 |
+
"mao_zedong_thought": "毛泽东思想和中国特色社会主义理论体系概论",
|
38 |
+
"education_science": "教育学",
|
39 |
+
"teacher_qualification": "教师资格",
|
40 |
+
"high_school_politics": "高中政治",
|
41 |
+
"high_school_geography": "高中地理",
|
42 |
+
"middle_school_politics": "初中政治",
|
43 |
+
"middle_school_geography": "初中地理",
|
44 |
+
"modern_chinese_history": "近代史纲要",
|
45 |
+
"ideological_and_moral_cultivation": "思想道德修养与法律基础",
|
46 |
+
"logic": "逻辑学",
|
47 |
+
"law": "法学",
|
48 |
+
"chinese_language_and_literature": "中国语言文学",
|
49 |
+
"art_studies": "艺术学",
|
50 |
+
"professional_tour_guide": "导游资格",
|
51 |
+
"legal_professional": "法律职业资格",
|
52 |
+
"high_school_chinese": "高中语文",
|
53 |
+
"high_school_history": "高中历史",
|
54 |
+
"middle_school_history": "初中历史",
|
55 |
+
"civil_servant": "公务员",
|
56 |
+
"sports_science": "体育学",
|
57 |
+
"plant_protection": "植物保护",
|
58 |
+
"basic_medicine": "基础医学",
|
59 |
+
"clinical_medicine": "临床医学",
|
60 |
+
"urban_and_rural_planner": "注册城乡规划师",
|
61 |
+
"accountant": "注册会计师",
|
62 |
+
"fire_engineer": "注册消防工程师",
|
63 |
+
"environmental_impact_assessment_engineer": "环境影响评价工程师",
|
64 |
+
"tax_accountant": "税务师",
|
65 |
+
"physician": "医师资格",
|
66 |
+
}
|
67 |
+
|
68 |
+
|
69 |
+
def parse_args():
|
70 |
+
parser = argparse.ArgumentParser()
|
71 |
+
parser.add_argument("--base_yaml_path", required=True)
|
72 |
+
parser.add_argument("--save_prefix_path", default="ceval-valid")
|
73 |
+
parser.add_argument("--cot_prompt_path", default=None)
|
74 |
+
parser.add_argument("--task_prefix", default="")
|
75 |
+
return parser.parse_args()
|
76 |
+
|
77 |
+
|
78 |
+
if __name__ == "__main__":
|
79 |
+
args = parse_args()
|
80 |
+
|
81 |
+
# get filename of base_yaml so we can `"include": ` it in our other YAMLs.
|
82 |
+
base_yaml_name = os.path.split(args.base_yaml_path)[-1]
|
83 |
+
with open(args.base_yaml_path, encoding="utf-8") as f:
|
84 |
+
base_yaml = yaml.full_load(f)
|
85 |
+
|
86 |
+
if args.cot_prompt_path is not None:
|
87 |
+
import json
|
88 |
+
|
89 |
+
with open(args.cot_prompt_path, encoding="utf-8") as f:
|
90 |
+
cot_file = json.load(f)
|
91 |
+
|
92 |
+
for subject_eng, subject_zh in tqdm(SUBJECTS.items()):
|
93 |
+
if args.cot_prompt_path is not None:
|
94 |
+
description = cot_file[subject_eng]
|
95 |
+
else:
|
96 |
+
description = (
|
97 |
+
f"以下是中国关于{subject_zh}的单项选择题,请选出其中的正确答案。\n\n"
|
98 |
+
)
|
99 |
+
|
100 |
+
yaml_dict = {
|
101 |
+
"include": base_yaml_name,
|
102 |
+
"task": f"ceval-valid_{args.task_prefix}_{subject_eng}"
|
103 |
+
if args.task_prefix != ""
|
104 |
+
else f"ceval-valid_{subject_eng}",
|
105 |
+
"dataset_name": subject_eng,
|
106 |
+
"description": description,
|
107 |
+
}
|
108 |
+
|
109 |
+
file_save_path = args.save_prefix_path + f"_{subject_eng}.yaml"
|
110 |
+
eval_logger.info(f"Saving yaml for subset {subject_eng} to {file_save_path}")
|
111 |
+
with open(file_save_path, "w", encoding="utf-8") as yaml_file:
|
112 |
+
yaml.dump(
|
113 |
+
yaml_dict,
|
114 |
+
yaml_file,
|
115 |
+
width=float("inf"),
|
116 |
+
allow_unicode=True,
|
117 |
+
default_style='"',
|
118 |
+
)
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_accountant.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "accountant"
|
2 |
+
"description": "以下是中国关于注册会计师的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_accountant"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_advanced_mathematics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "advanced_mathematics"
|
2 |
+
"description": "以下是中国关于高等数学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_advanced_mathematics"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_art_studies.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "art_studies"
|
2 |
+
"description": "以下是中国关于艺术学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_art_studies"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_basic_medicine.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "basic_medicine"
|
2 |
+
"description": "以下是中国关于基础医学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_basic_medicine"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_business_administration.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "business_administration"
|
2 |
+
"description": "以下是中国关于工商管理的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_business_administration"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_chinese_language_and_literature.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "chinese_language_and_literature"
|
2 |
+
"description": "以下是中国关于中国语言文学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_chinese_language_and_literature"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_civil_servant.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "civil_servant"
|
2 |
+
"description": "以下是中国关于公务员的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_civil_servant"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_clinical_medicine.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "clinical_medicine"
|
2 |
+
"description": "以下是中国关于临床医学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_clinical_medicine"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_college_chemistry.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_chemistry"
|
2 |
+
"description": "以下是中国关于大学化学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_college_chemistry"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_college_economics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_economics"
|
2 |
+
"description": "以下是中国关于大学经济学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_college_economics"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_college_physics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_physics"
|
2 |
+
"description": "以下是中国关于大学物理的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_college_physics"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_college_programming.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_programming"
|
2 |
+
"description": "以下是中国关于大学编程的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_college_programming"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_computer_architecture.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "computer_architecture"
|
2 |
+
"description": "以下是中国关于计算机组成的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_computer_architecture"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_computer_network.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "computer_network"
|
2 |
+
"description": "以下是中国关于计算机网络的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_computer_network"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_discrete_mathematics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "discrete_mathematics"
|
2 |
+
"description": "以下是中国关于离散数学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_discrete_mathematics"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_education_science.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "education_science"
|
2 |
+
"description": "以下是中国关于教育学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_education_science"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_electrical_engineer.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "electrical_engineer"
|
2 |
+
"description": "以下是中国关于注册电气工程师的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_electrical_engineer"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_environmental_impact_assessment_engineer.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "environmental_impact_assessment_engineer"
|
2 |
+
"description": "以下是中国关于环境影响评价工程师的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_environmental_impact_assessment_engineer"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_fire_engineer.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "fire_engineer"
|
2 |
+
"description": "以下是中国关于注册消防工程师的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_fire_engineer"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_biology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_biology"
|
2 |
+
"description": "以下是中国关于高中生物的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_high_school_biology"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_chemistry.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_chemistry"
|
2 |
+
"description": "以下是中国关于高中化学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_high_school_chemistry"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_chinese.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_chinese"
|
2 |
+
"description": "以下是中国关于高中语文的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_high_school_chinese"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_geography.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_geography"
|
2 |
+
"description": "以下是中国关于高中地理的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_high_school_geography"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_history.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_history"
|
2 |
+
"description": "以下是中国关于高中历史的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_high_school_history"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_mathematics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_mathematics"
|
2 |
+
"description": "以下是中国关于高中数学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_high_school_mathematics"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_physics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_physics"
|
2 |
+
"description": "以下是中国关于高中物理的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_high_school_physics"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_high_school_politics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_politics"
|
2 |
+
"description": "以下是中国关于高中政治的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_high_school_politics"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_ideological_and_moral_cultivation.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "ideological_and_moral_cultivation"
|
2 |
+
"description": "以下是中国关于思想道德修养与法律基础的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_ideological_and_moral_cultivation"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_law.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "law"
|
2 |
+
"description": "以下是中国关于法学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_law"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_legal_professional.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "legal_professional"
|
2 |
+
"description": "以下是中国关于法律职业资格的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_legal_professional"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_logic.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "logic"
|
2 |
+
"description": "以下是中国关于逻辑学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_logic"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_mao_zedong_thought.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "mao_zedong_thought"
|
2 |
+
"description": "以下是中国关于毛泽东思想和中国特色社会主义理论体系概论的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_mao_zedong_thought"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_marxism.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "marxism"
|
2 |
+
"description": "以下是中国关于马克思主义基本原理的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_marxism"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_metrology_engineer.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "metrology_engineer"
|
2 |
+
"description": "以下是中国关于注册计量师的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_metrology_engineer"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_middle_school_biology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "middle_school_biology"
|
2 |
+
"description": "以下是中国关于初中生物的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_middle_school_biology"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_middle_school_chemistry.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "middle_school_chemistry"
|
2 |
+
"description": "以下是中国关于初中化学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_middle_school_chemistry"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_middle_school_geography.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "middle_school_geography"
|
2 |
+
"description": "以下是中国关于初中地理的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_middle_school_geography"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_middle_school_history.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "middle_school_history"
|
2 |
+
"description": "以下是中国关于初中历史的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_middle_school_history"
|
lm-evaluation/build/lib/lm_eval/tasks/ceval/ceval-valid_middle_school_mathematics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "middle_school_mathematics"
|
2 |
+
"description": "以下是中国关于初中数学的单项选择题,请选出其中的正确答案。\n\n"
|
3 |
+
"include": "_default_ceval_yaml"
|
4 |
+
"task": "ceval-valid_middle_school_mathematics"
|