diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_abstract_algebra.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_abstract_algebra.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6448b2eefe7decc008a19ff306b83c306bdf9bcc --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_abstract_algebra.yaml @@ -0,0 +1,4 @@ +"dataset_name": "abstract_algebra" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_abstract_algebra" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_anatomy.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_anatomy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0ea332903d86f6f76e4e43718d6f8ef4b1f887ea --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_anatomy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "anatomy" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_anatomy" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_college_chemistry.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_college_chemistry.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d89e5d36d6c3e81b67cee20af8adaa64cdb69769 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_college_chemistry.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_chemistry" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_college_chemistry" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_college_computer_science.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_college_computer_science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bd24ec782052fbdb2a18c6b271d7db7fd1eb0c21 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_college_computer_science.yaml @@ -0,0 +1,4 @@ +"dataset_name": "college_computer_science" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_college_computer_science" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_computer_security.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_computer_security.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f6d0edec21fe23c1f38f17a80990f9af70779759 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_computer_security.yaml @@ -0,0 +1,4 @@ +"dataset_name": "computer_security" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_computer_security" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_elementary_mathematics.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_elementary_mathematics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..afb9144da92bdc592e66f96133c538eb0c1829ef --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_elementary_mathematics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "elementary_mathematics" +"description": "فم بعملية التقييم في مجال ألعلوم وتقنية المعلومات و الرياضيات \n\n" +"include": "_default_template_yaml" +"task": "ammlu_elementary_mathematics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_geography.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_geography.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f180cee343c1df1aed2b443db0f39aab519167c1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_geography.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_geography" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_geography" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_government_and_politics.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_government_and_politics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88fe999fa3913051d3d43c3d8bbc7739d5567ff5 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_high_school_government_and_politics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "high_school_government_and_politics" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_high_school_government_and_politics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_medical_genetics.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_medical_genetics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88c56754e2ca7d97312d975119467606d577b0db --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_medical_genetics.yaml @@ -0,0 +1,4 @@ +"dataset_name": "medical_genetics" +"description": "فم بعملية التقييم في مجال علوم أخرى \n\n" +"include": "_default_template_yaml" +"task": "ammlu_medical_genetics" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_moral_disputes.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_moral_disputes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a01ffb2c4a29fd8bbf87edd08937ea7801681a56 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_moral_disputes.yaml @@ -0,0 +1,4 @@ +"dataset_name": "moral_disputes" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_moral_disputes" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_philosophy.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_philosophy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7211875da65c4f8e0c8b6d5b7554d27a24d42aef --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_philosophy.yaml @@ -0,0 +1,4 @@ +"dataset_name": "philosophy" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_philosophy" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_prehistory.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_prehistory.yaml new file mode 100644 index 0000000000000000000000000000000000000000..77a29ee976fadae33505888060195bdc496ca5a8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_prehistory.yaml @@ -0,0 +1,4 @@ +"dataset_name": "prehistory" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_prehistory" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_security_studies.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_security_studies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6f8b9e50fc3ea5e07c3cbd0800c9a7a440c0c2ff --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_security_studies.yaml @@ -0,0 +1,4 @@ +"dataset_name": "security_studies" +"description": "فم بعملية التقييم في مجال العلوم الإجتماعية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_security_studies" diff --git a/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_world_religions.yaml b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_world_religions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dc433e13d9da096bc16b3e64c1138bedc04b4813 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/ammlu/ammlu_world_religions.yaml @@ -0,0 +1,4 @@ +"dataset_name": "world_religions" +"description": "فم بعملية التقييم في مجال العلوم الانسانية \n\n" +"include": "_default_template_yaml" +"task": "ammlu_world_religions" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_acm_Arab.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_acm_Arab.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7439ce8adfa6127abf3381a1f193194e55826fcc --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_acm_Arab.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "acm_Arab" +"include": "_default_template_yaml" +"task": "belebele_acm_Arab" +"test_split": "acm_Arab" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ary_Arab.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ary_Arab.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fe00dd0342b89d99d860b9cc7bef2aad66cf5875 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ary_Arab.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ary_Arab" +"include": "_default_template_yaml" +"task": "belebele_ary_Arab" +"test_split": "ary_Arab" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ckb_Arab.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ckb_Arab.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6ad49a8eae0550ddd23ca51839c2d72b31031725 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ckb_Arab.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ckb_Arab" +"include": "_default_template_yaml" +"task": "belebele_ckb_Arab" +"test_split": "ckb_Arab" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_fra_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_fra_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c60fa9a7a90db3fc7f5451b39705a487acf18b29 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_fra_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "fra_Latn" +"include": "_default_template_yaml" +"task": "belebele_fra_Latn" +"test_split": "fra_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_jpn_Jpan.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_jpn_Jpan.yaml new file mode 100644 index 0000000000000000000000000000000000000000..73b60502bc90b9e2128f9e2eb72046cf961b1054 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_jpn_Jpan.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "jpn_Jpan" +"include": "_default_template_yaml" +"task": "belebele_jpn_Jpan" +"test_split": "jpn_Jpan" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mlt_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mlt_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5303cdd976b5b1f2ebbe1f36e8661ab966a856b6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_mlt_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "mlt_Latn" +"include": "_default_template_yaml" +"task": "belebele_mlt_Latn" +"test_split": "mlt_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ssw_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ssw_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..788d6959976320f5fb962e442aa8fa9c2ed9cca8 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_ssw_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "ssw_Latn" +"include": "_default_template_yaml" +"task": "belebele_ssw_Latn" +"test_split": "ssw_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_sun_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_sun_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e599beb49507b9566f0d0e77c20673cecfc84df --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_sun_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "sun_Latn" +"include": "_default_template_yaml" +"task": "belebele_sun_Latn" +"test_split": "sun_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_zul_Latn.yaml b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_zul_Latn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1e7fede97ca234d40a87acc7a0e21aaf659a2faf --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/belebele/belebele_zul_Latn.yaml @@ -0,0 +1,4 @@ +"fewshot_split": "zul_Latn" +"include": "_default_template_yaml" +"task": "belebele_zul_Latn" +"test_split": "zul_Latn" diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/README.md b/lm-evaluation-harness/lm_eval/tasks/bigbench/README.md new file mode 100644 index 0000000000000000000000000000000000000000..be680eac2955668030b5e3ab90fdbf1ea4481c50 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/README.md @@ -0,0 +1,49 @@ +# BigBench + +### Paper + +Title: `Beyond the Imitation Game: Quantifying and extrapolating the capabilities of language models` + +Abstract: https://arxiv.org/abs/2206.04615 + +The Beyond the Imitation Game Benchmark (BIG-bench) is a collaborative benchmark intended to probe large language models and extrapolate their future capabilities. + +Homepage: https://github.com/google/BIG-bench + + +### Citation + +``` +@misc{srivastava2022imitation, + title={Beyond the Imitation Game: Quantifying and extrapolating the capabilities of language models}, + author={Aarohi Srivastava and Abhinav Rastogi and Abhishek Rao and Abu Awal Md Shoeb and Abubakar Abid and Adam Fisch and Adam R. Brown and Adam Santoro and Aditya Gupta and Adrià Garriga-Alonso and Agnieszka Kluska and Aitor Lewkowycz and Akshat Agarwal and Alethea Power and Alex Ray and Alex Warstadt and Alexander W. Kocurek and Ali Safaya and Ali Tazarv and Alice Xiang and Alicia Parrish and Allen Nie and Aman Hussain and Amanda Askell and Amanda Dsouza and Ambrose Slone and Ameet Rahane and Anantharaman S. Iyer and Anders Andreassen and Andrea Madotto and Andrea Santilli and Andreas Stuhlmüller and Andrew Dai and Andrew La and Andrew Lampinen and Andy Zou and Angela Jiang and Angelica Chen and Anh Vuong and Animesh Gupta and Anna Gottardi and Antonio Norelli and Anu Venkatesh and Arash Gholamidavoodi and Arfa Tabassum and Arul Menezes and Arun Kirubarajan and Asher Mullokandov and Ashish Sabharwal and Austin Herrick and Avia Efrat and Aykut Erdem and Ayla Karakaş and B. Ryan Roberts and Bao Sheng Loe and Barret Zoph and Bartłomiej Bojanowski and Batuhan Özyurt and Behnam Hedayatnia and Behnam Neyshabur and Benjamin Inden and Benno Stein and Berk Ekmekci and Bill Yuchen Lin and Blake Howald and Cameron Diao and Cameron Dour and Catherine Stinson and Cedrick Argueta and César Ferri Ramírez and Chandan Singh and Charles Rathkopf and Chenlin Meng and Chitta Baral and Chiyu Wu and Chris Callison-Burch and Chris Waites and Christian Voigt and Christopher D. Manning and Christopher Potts and Cindy Ramirez and Clara E. Rivera and Clemencia Siro and Colin Raffel and Courtney Ashcraft and Cristina Garbacea and Damien Sileo and Dan Garrette and Dan Hendrycks and Dan Kilman and Dan Roth and Daniel Freeman and Daniel Khashabi and Daniel Levy and Daniel Moseguí González and Danielle Perszyk and Danny Hernandez and Danqi Chen and Daphne Ippolito and Dar Gilboa and David Dohan and David Drakard and David Jurgens and Debajyoti Datta and Deep Ganguli and Denis Emelin and Denis Kleyko and Deniz Yuret and Derek Chen and Derek Tam and Dieuwke Hupkes and Diganta Misra and Dilyar Buzan and Dimitri Coelho Mollo and Diyi Yang and Dong-Ho Lee and Ekaterina Shutova and Ekin Dogus Cubuk and Elad Segal and Eleanor Hagerman and Elizabeth Barnes and Elizabeth Donoway and Ellie Pavlick and Emanuele Rodola and Emma Lam and Eric Chu and Eric Tang and Erkut Erdem and Ernie Chang and Ethan A. Chi and Ethan Dyer and Ethan Jerzak and Ethan Kim and Eunice Engefu Manyasi and Evgenii Zheltonozhskii and Fanyue Xia and Fatemeh Siar and Fernando Martínez-Plumed and Francesca Happé and Francois Chollet and Frieda Rong and Gaurav Mishra and Genta Indra Winata and Gerard de Melo and Germán Kruszewski and Giambattista Parascandolo and Giorgio Mariani and Gloria Wang and Gonzalo Jaimovitch-López and Gregor Betz and Guy Gur-Ari and Hana Galijasevic and Hannah Kim and Hannah Rashkin and Hannaneh Hajishirzi and Harsh Mehta and Hayden Bogar and Henry Shevlin and Hinrich Schütze and Hiromu Yakura and Hongming Zhang and Hugh Mee Wong and Ian Ng and Isaac Noble and Jaap Jumelet and Jack Geissinger and Jackson Kernion and Jacob Hilton and Jaehoon Lee and Jaime Fernández Fisac and James B. Simon and James Koppel and James Zheng and James Zou and Jan Kocoń and Jana Thompson and Jared Kaplan and Jarema Radom and Jascha Sohl-Dickstein and Jason Phang and Jason Wei and Jason Yosinski and Jekaterina Novikova and Jelle Bosscher and Jennifer Marsh and Jeremy Kim and Jeroen Taal and Jesse Engel and Jesujoba Alabi and Jiacheng Xu and Jiaming Song and Jillian Tang and Joan Waweru and John Burden and John Miller and John U. Balis and Jonathan Berant and Jörg Frohberg and Jos Rozen and Jose Hernandez-Orallo and Joseph Boudeman and Joseph Jones and Joshua B. Tenenbaum and Joshua S. Rule and Joyce Chua and Kamil Kanclerz and Karen Livescu and Karl Krauth and Karthik Gopalakrishnan and Katerina Ignatyeva and Katja Markert and Kaustubh D. Dhole and Kevin Gimpel and Kevin Omondi and Kory Mathewson and Kristen Chiafullo and Ksenia Shkaruta and Kumar Shridhar and Kyle McDonell and Kyle Richardson and Laria Reynolds and Leo Gao and Li Zhang and Liam Dugan and Lianhui Qin and Lidia Contreras-Ochando and Louis-Philippe Morency and Luca Moschella and Lucas Lam and Lucy Noble and Ludwig Schmidt and Luheng He and Luis Oliveros Colón and Luke Metz and Lütfi Kerem Şenel and Maarten Bosma and Maarten Sap and Maartje ter Hoeve and Maheen Farooqi and Manaal Faruqui and Mantas Mazeika and Marco Baturan and Marco Marelli and Marco Maru and Maria Jose Ramírez Quintana and Marie Tolkiehn and Mario Giulianelli and Martha Lewis and Martin Potthast and Matthew L. Leavitt and Matthias Hagen and Mátyás Schubert and Medina Orduna Baitemirova and Melody Arnaud and Melvin McElrath and Michael A. Yee and Michael Cohen and Michael Gu and Michael Ivanitskiy and Michael Starritt and Michael Strube and Michał Swędrowski and Michele Bevilacqua and Michihiro Yasunaga and Mihir Kale and Mike Cain and Mimee Xu and Mirac Suzgun and Mo Tiwari and Mohit Bansal and Moin Aminnaseri and Mor Geva and Mozhdeh Gheini and Mukund Varma T and Nanyun Peng and Nathan Chi and Nayeon Lee and Neta Gur-Ari Krakover and Nicholas Cameron and Nicholas Roberts and Nick Doiron and Nikita Nangia and Niklas Deckers and Niklas Muennighoff and Nitish Shirish Keskar and Niveditha S. Iyer and Noah Constant and Noah Fiedel and Nuan Wen and Oliver Zhang and Omar Agha and Omar Elbaghdadi and Omer Levy and Owain Evans and Pablo Antonio Moreno Casares and Parth Doshi and Pascale Fung and Paul Pu Liang and Paul Vicol and Pegah Alipoormolabashi and Peiyuan Liao and Percy Liang and Peter Chang and Peter Eckersley and Phu Mon Htut and Pinyu Hwang and Piotr Miłkowski and Piyush Patil and Pouya Pezeshkpour and Priti Oli and Qiaozhu Mei and Qing Lyu and Qinlang Chen and Rabin Banjade and Rachel Etta Rudolph and Raefer Gabriel and Rahel Habacker and Ramón Risco Delgado and Raphaël Millière and Rhythm Garg and Richard Barnes and Rif A. Saurous and Riku Arakawa and Robbe Raymaekers and Robert Frank and Rohan Sikand and Roman Novak and Roman Sitelew and Ronan LeBras and Rosanne Liu and Rowan Jacobs and Rui Zhang and Ruslan Salakhutdinov and Ryan Chi and Ryan Lee and Ryan Stovall and Ryan Teehan and Rylan Yang and Sahib Singh and Saif M. Mohammad and Sajant Anand and Sam Dillavou and Sam Shleifer and Sam Wiseman and Samuel Gruetter and Samuel R. Bowman and Samuel S. Schoenholz and Sanghyun Han and Sanjeev Kwatra and Sarah A. Rous and Sarik Ghazarian and Sayan Ghosh and Sean Casey and Sebastian Bischoff and Sebastian Gehrmann and Sebastian Schuster and Sepideh Sadeghi and Shadi Hamdan and Sharon Zhou and Shashank Srivastava and Sherry Shi and Shikhar Singh and Shima Asaadi and Shixiang Shane Gu and Shubh Pachchigar and Shubham Toshniwal and Shyam Upadhyay and Shyamolima and Debnath and Siamak Shakeri and Simon Thormeyer and Simone Melzi and Siva Reddy and Sneha Priscilla Makini and Soo-Hwan Lee and Spencer Torene and Sriharsha Hatwar and Stanislas Dehaene and Stefan Divic and Stefano Ermon and Stella Biderman and Stephanie Lin and Stephen Prasad and Steven T. Piantadosi and Stuart M. Shieber and Summer Misherghi and Svetlana Kiritchenko and Swaroop Mishra and Tal Linzen and Tal Schuster and Tao Li and Tao Yu and Tariq Ali and Tatsu Hashimoto and Te-Lin Wu and Théo Desbordes and Theodore Rothschild and Thomas Phan and Tianle Wang and Tiberius Nkinyili and Timo Schick and Timofei Kornev and Timothy Telleen-Lawton and Titus Tunduny and Tobias Gerstenberg and Trenton Chang and Trishala Neeraj and Tushar Khot and Tyler Shultz and Uri Shaham and Vedant Misra and Vera Demberg and Victoria Nyamai and Vikas Raunak and Vinay Ramasesh and Vinay Uday Prabhu and Vishakh Padmakumar and Vivek Srikumar and William Fedus and William Saunders and William Zhang and Wout Vossen and Xiang Ren and Xiaoyu Tong and Xinran Zhao and Xinyi Wu and Xudong Shen and Yadollah Yaghoobzadeh and Yair Lakretz and Yangqiu Song and Yasaman Bahri and Yejin Choi and Yichi Yang and Yiding Hao and Yifu Chen and Yonatan Belinkov and Yu Hou and Yufang Hou and Yuntao Bai and Zachary Seid and Zhuoye Zhao and Zijian Wang and Zijie J. Wang and Zirui Wang and Ziyi Wu}, + year={2022}, + eprint={2206.04615}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* `group_name`: `Short description` + +#### Tasks + +* `task_name`: `1-sentence description of what this particular task does` +* `task_name2`: ... + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_tasks.py b/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..169c66465559864117e3b991ea5b868d87d2c78d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_tasks.py @@ -0,0 +1,206 @@ +import os + +import yaml + + +all_subtasks = [ + "abstract_narrative_understanding", + "anachronisms", + "analogical_similarity", + "analytic_entailment", + "arithmetic", + "ascii_word_recognition", + "authorship_verification", + "auto_categorization", + "auto_debugging", + "bbq_lite_json", + "bridging_anaphora_resolution_barqa", + "causal_judgment", + "cause_and_effect", + "checkmate_in_one", + "chess_state_tracking", + "chinese_remainder_theorem", + "cifar10_classification", + "code_line_description", + "codenames", + "color", + "common_morpheme", + "conceptual_combinations", + "conlang_translation", + "contextual_parametric_knowledge_conflicts", + "crash_blossom", + "crass_ai", + "cryobiology_spanish", + "cryptonite", + "cs_algorithms", + "dark_humor_detection", + "date_understanding", + "disambiguation_qa", + "discourse_marker_prediction", + "disfl_qa", + "dyck_languages", + "elementary_math_qa", + "emoji_movie", + "emojis_emotion_prediction", + "empirical_judgments", + "english_proverbs", + "english_russian_proverbs", + "entailed_polarity", + "entailed_polarity_hindi", + "epistemic_reasoning", + "evaluating_information_essentiality", + "fact_checker", + "fantasy_reasoning", + "few_shot_nlg", + "figure_of_speech_detection", + "formal_fallacies_syllogisms_negation", + "gem", + "gender_inclusive_sentences_german", + "general_knowledge", + "geometric_shapes", + "goal_step_wikihow", + "gre_reading_comprehension", + "hhh_alignment", + "hindi_question_answering", + "hindu_knowledge", + "hinglish_toxicity", + "human_organs_senses", + "hyperbaton", + "identify_math_theorems", + "identify_odd_metaphor", + "implicatures", + "implicit_relations", + "intent_recognition", + "international_phonetic_alphabet_nli", + "international_phonetic_alphabet_transliterate", + "intersect_geometry", + "irony_identification", + "kanji_ascii", + "kannada", + "key_value_maps", + "known_unknowns", + "language_games", + "language_identification", + "linguistic_mappings", + "linguistics_puzzles", + "list_functions", + "logic_grid_puzzle", + "logical_args", + "logical_deduction", + "logical_fallacy_detection", + "logical_sequence", + "mathematical_induction", + "matrixshapes", + "metaphor_boolean", + "metaphor_understanding", + "minute_mysteries_qa", + "misconceptions", + "misconceptions_russian", + "mnist_ascii", + "modified_arithmetic", + "moral_permissibility", + "movie_dialog_same_or_different", + "movie_recommendation", + "mult_data_wrangling", + "multiemo", + "natural_instructions", + "navigate", + "nonsense_words_grammar", + "novel_concepts", + "object_counting", + "odd_one_out", + "operators", + "paragraph_segmentation", + "parsinlu_qa", + "parsinlu_reading_comprehension", + "penguins_in_a_table", + "periodic_elements", + "persian_idioms", + "phrase_relatedness", + "physical_intuition", + "physics", + "physics_questions", + "play_dialog_same_or_different", + "polish_sequence_labeling", + "presuppositions_as_nli", + "qa_wikidata", + "question_selection", + "real_or_fake_text", + "reasoning_about_colored_objects", + "repeat_copy_logic", + "rephrase", + "riddle_sense", + "ruin_names", + "salient_translation_error_detection", + "scientific_press_release", + "semantic_parsing_in_context_sparc", + "semantic_parsing_spider", + "sentence_ambiguity", + "similarities_abstraction", + "simp_turing_concept", + "simple_arithmetic_json", + "simple_arithmetic_json_multiple_choice", + "simple_arithmetic_json_subtasks", + "simple_arithmetic_multiple_targets_json", + "simple_ethical_questions", + "simple_text_editing", + "snarks", + "social_iqa", + "social_support", + "sports_understanding", + "strange_stories", + "strategyqa", + "sufficient_information", + "suicide_risk", + "swahili_english_proverbs", + "swedish_to_german_proverbs", + "symbol_interpretation", + "temporal_sequences", + "tense", + "timedial", + "topical_chat", + "tracking_shuffled_objects", + "understanding_fables", + "undo_permutation", + "unit_conversion", + "unit_interpretation", + "unnatural_in_context_learning", + "vitaminc_fact_verification", + "what_is_the_tao", + "which_wiki_edit", + "winowhy", + "word_sorting", + "word_unscrambling", +] + + +def main() -> None: + for path, task_type in zip( + ["multiple_choice", "generate_until"], + ["multiple_choice_template_yaml", "generate_until_template_yaml"], + ): + os.makedirs(path, exist_ok=True) + for task in all_subtasks: + file_name = f"{task}.yaml" + try: + with open(f"{path}/{file_name}", "w", encoding="utf-8") as f: + f.write("# Generated by utils.py\n") + yaml.dump( + { + "include": f"../{task_type}", + "task": "bigbench_" + + task + + "_{}".format(task_type.split("_template_yaml")[0]), + "dataset_name": task + + "_zero_shot", # zero-shot version of the dataset + }, + f, + width=float("inf"), + allow_unicode=True, + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until_template_yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..25593a4ef62ae7c8ecd4a3d9bd3de4847cf4d267 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until_template_yaml @@ -0,0 +1,18 @@ +group: bigbench_generate_until +dataset_path: hails/bigbench +output_type: generate_until +dataset_kwargs: + # num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods + # subtask_name: null +test_split: default +doc_to_text: inputs +doc_to_target: "{{targets[0]}}" +generation_kwargs: + max_gen_toks: 128 +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_punctuation: true +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6e20092e9dce7594545786eb54ed587813158ba4 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/analogical_similarity.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: analogical_similarity_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_analogical_similarity_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..254f115b6517f22ead0c74870cb835299c3f6130 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/ascii_word_recognition.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: ascii_word_recognition_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_ascii_word_recognition_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3c4be304435bac358b7ddb732f60605ab029ed82 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/bbq_lite_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: bbq_lite_json_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_bbq_lite_json_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3bf9d9bf56702e0e52e53849cafc874dd6588778 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/contextual_parametric_knowledge_conflicts.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: contextual_parametric_knowledge_conflicts_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_contextual_parametric_knowledge_conflicts_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2827232a601ebcd2eb217df8c4ff2dde3542fc2d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/disambiguation_qa.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: disambiguation_qa_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_disambiguation_qa_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0604d97d834d15a4db5adc57b4d1240cabacbb33 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/emoji_movie.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: emoji_movie_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_emoji_movie_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c35581af46c580527af70b12aebe60aa808181d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/epistemic_reasoning.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: epistemic_reasoning_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_epistemic_reasoning_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2fef6d9301484a42a5a4cd26f2df0dd241b0d104 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/human_organs_senses.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: human_organs_senses_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_human_organs_senses_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a19ff99e55b6c61967b850dc0e356d0d474dc8fb --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/irony_identification.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: irony_identification_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_irony_identification_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a90a82860909a072cbc0b9ff431439d5b9488b94 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/kanji_ascii.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: kanji_ascii_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_kanji_ascii_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7c476c4eb9b24b918a97c1e88943b862209db85d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/metaphor_boolean.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: metaphor_boolean_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_metaphor_boolean_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4874dd155bbffe0b3e749583d8d989dd548ea537 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/natural_instructions.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: natural_instructions_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_natural_instructions_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..277d843d7cd330a599c3cc33cedd03a40c671786 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/object_counting.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: object_counting_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_object_counting_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c797aec6e6d3f781b6a6882178f7ff34eb922a04 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/phrase_relatedness.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: phrase_relatedness_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_phrase_relatedness_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml new file mode 100644 index 0000000000000000000000000000000000000000..263d61ebe60d1f8ac22a119b6ead38df0d2dc03b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/qa_wikidata.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: qa_wikidata_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_qa_wikidata_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d930e7419a4175762e8cacf1f5297cc4424dd0d6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/salient_translation_error_detection.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: salient_translation_error_detection_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_salient_translation_error_detection_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a988e54c51380f004e22cc303812e192c8291328 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/semantic_parsing_spider.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: semantic_parsing_spider_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_semantic_parsing_spider_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml new file mode 100644 index 0000000000000000000000000000000000000000..685ec17c1ad672bc07df05bb140c1400043bc2d6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_multiple_targets_json.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_arithmetic_multiple_targets_json_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_simple_arithmetic_multiple_targets_json_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..13b67888cd767063dc1c0ceeceeb92c256b54a7d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_text_editing.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: simple_text_editing_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_simple_text_editing_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3e79f1ce10d385b187ca5f0ef4516c77dae291b3 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/snarks.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: snarks_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_snarks_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f9aa366b7abcc27a0efcbc825068c0fdfdd4c929 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/tracking_shuffled_objects.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: tracking_shuffled_objects_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_tracking_shuffled_objects_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f7e1feb0525496ba8603edb58910f4522b06933c --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/undo_permutation.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: undo_permutation_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_undo_permutation_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3dbfb0305efb863f6d698d7f3acedadb320d9a63 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/which_wiki_edit.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: which_wiki_edit_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_which_wiki_edit_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98bc6e4b23a75abd1a4a560260b88a95034e1f0b --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/winowhy.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: winowhy_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_winowhy_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml new file mode 100644 index 0000000000000000000000000000000000000000..71e79ae36353d0ca44548b60dfc0d623c4584edf --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/word_sorting.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: word_sorting_zero_shot +include: ../multiple_choice_template_yaml +task: bigbench_word_sorting_multiple_choice diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice_template_yaml b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..10fce5c1c36738122508a36ce2a70f84718aabea --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice_template_yaml @@ -0,0 +1,15 @@ +group: bigbench_multiple_choice +dataset_path: hails/bigbench +dataset_kwargs: + # num_shots: 0 # TODO: num of shots for `bigbench` HF dataset should be controlled through this, not through the typical methods + # subtask_name: null +output_type: multiple_choice +test_split: default +doc_to_text: inputs +doc_to_target: "{{multiple_choice_targets.index(targets[0])}}" +doc_to_choice: "{{multiple_choice_targets}}" +metric_list: + - metric: acc + # TODO: brier score and other metrics +metadata: + version: 0.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/bigbench/push_bigbench_dataset.py b/lm-evaluation-harness/lm_eval/tasks/bigbench/push_bigbench_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..44577fa5d41fae16cbbc1936d036a8aa0bf148f2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/bigbench/push_bigbench_dataset.py @@ -0,0 +1,31 @@ +""" +A utility script that pushes all Bigbench subtasks from their form in the `bigbench` HF dataset +into `{org name}/bigbench`. + +Prior to running, log into HF Hub for the target HF hub org via `huggingface-cli login`. + +Requires the installation of +`pip install "bigbench @ https://storage.googleapis.com/public_research_data/bigbench/bigbench-0.0.1.tar.gz"` +and is included so that the bigbench dependency can be avoided. +""" +import bigbench.api.util as bb_utils +import datasets +from tqdm import tqdm + + +all_task_names = bb_utils.get_all_json_task_names() + +num_shots = [0] + +for shots in num_shots: + for task_name in tqdm(all_task_names): + try: + print(f"Loading '{task_name}' with num_shots={shots}...") + task_ds = datasets.load_dataset("bigbench", name=task_name, num_shots=shots) + + print(f"Pushing '{task_name}' with num_shots={shots}...") + task_ds.push_to_hub("hails/bigbench", task_name + "_zero_shot") + + del task_ds + except Exception as e: + raise e diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehuaux.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehuaux.yaml new file mode 100644 index 0000000000000000000000000000000000000000..88316e512a4dabff3e550f84f3401216316991a7 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehuaux.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehuaux +include: eus_exams_es +task: eus_exams_es_opeehuaux diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehusubalterno.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehusubalterno.yaml new file mode 100644 index 0000000000000000000000000000000000000000..96cc86b402af1777e075530b3258e3a9089d539f --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehusubalterno.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehusubalterno +include: eus_exams_es +task: eus_exams_es_opeehusubalterno diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnico.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnico.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0641fc2e7766d5f93ba1c45c83761f6e5b57560a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnico.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehutecnico +include: eus_exams_es +task: eus_exams_es_opeehutecnico diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnicob.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnicob.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a338a1ab0d542368acf179b0611a354ddb71d293 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeehutecnicob.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeehutecnicob +include: eus_exams_es +task: eus_exams_es_opeehutecnicob diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiadmin.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiadmin.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85c771cdb3ead8511963b811043891958f19e340 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiadmin.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakiadmin +include: eus_exams_es +task: eus_exams_es_opeosakiadmin diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d61825b0beac1f50137ff42d74b6b649f30ea4e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeosakiaux.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakiaux +include: eus_exams_es +task: eus_exams_es_opeosakiaux diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeosakienf.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeosakienf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e4749cac111d410d6b573a5365ec6778ec4645f2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_opeosakienf.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_opeosakienf +include: eus_exams_es +task: eus_exams_es_opeosakienf diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza7c.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza7c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1fc30ce353f83ccc717a504a50a7bd611f76e6c6 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza7c.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_osakidetza7c +include: eus_exams_es +task: eus_exams_es_osakidetza7c diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza8c.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza8c.yaml new file mode 100644 index 0000000000000000000000000000000000000000..38f7ee3c39af34bc0516780a8717172950fc955a --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_es_osakidetza8c.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: es_osakidetza8c +include: eus_exams_es +task: eus_exams_es_osakidetza8c diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_ejadministrari.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_ejadministrari.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f5630ddb05864cd3d6031ea8fed96e9715fb8990 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_ejadministrari.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_ejadministrari +include: eus_exams_eu +task: eus_exams_eu_ejadministrari diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_opeehubiblioeu.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_opeehubiblioeu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0ff2ab853fc839c5ae2b88520767b8b3d4a60f4d --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_opeehubiblioeu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeehubiblioeu +include: eus_exams_eu +task: eus_exams_eu_opeehubiblioeu diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_opegasteizkoudala.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_opegasteizkoudala.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e9211f39a162360b67e84399409b1617bc5cc1dd --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_opegasteizkoudala.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opegasteizkoudala +include: eus_exams_eu +task: eus_exams_eu_opegasteizkoudala diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiadmineu.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiadmineu.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cf19e09941bd0c7bb10db7f5398fb2398f1a0fd2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_opeosakiadmineu.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_opeosakiadmineu +include: eus_exams_eu +task: eus_exams_eu_opeosakiadmineu diff --git a/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza2e.yaml b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza2e.yaml new file mode 100644 index 0000000000000000000000000000000000000000..218dc87cb8affc37cc54e03d56bcf44213381e99 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/eus_exams/eus_exams_eu_osakidetza2e.yaml @@ -0,0 +1,4 @@ +# Generated by utils.py +dataset_name: eu_osakidetza2e +include: eus_exams_eu +task: eus_exams_eu_osakidetza2e diff --git a/lm-evaluation-harness/lm_eval/tasks/fld/README.md b/lm-evaluation-harness/lm_eval/tasks/fld/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1c7d88e3df69a6690c9da2c897cdf0b3d7311e05 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/fld/README.md @@ -0,0 +1,64 @@ +# FLD + +### Paper + +Title: Learning Deductive Reasoning from Synthetic Corpus based on Formal Logic + +Abstract: https://arxiv.org/abs/2308.07336 + +**FLD** (**F**ormal **L**ogic **D**eduction) is a deductive reasoning benchmark. +Given a set of facts and a hypothesis, an LLM is required to generate (i) proof steps to (dis-)prove the hypothesis, and (ii) an answer ("proved", "disproved" or unknown"). + +Unique features of FLD are: +* It assesses the model's logical reasoning ability *isolated from knowledge*, as the facts are randomly constructed so that referring to existing knowledge never helps solve the task. +* It assesses diverse reasoning patterns (i.e., deduction rules), as it is based on formal logic theory. +* As a result, it is highly challenging. Indeed, even GPT-4 can solve only about half of the problems. + +Homepage: https://github.com/hitachi-nlp/FLD + + +### Citation + +``` +@InProceedings{pmlr-v202-morishita23a, + title = {Learning Deductive Reasoning from Synthetic Corpus based on Formal Logic}, + author = {Morishita, Terufumi and Morio, Gaku and Yamaguchi, Atsuki and Sogawa, Yasuhiro}, + booktitle = {Proceedings of the 40th International Conference on Machine Learning}, + pages = {25254--25274}, + year = {2023}, + editor = {Krause, Andreas and Brunskill, Emma and Cho, Kyunghyun and Engelhardt, Barbara and Sabato, Sivan and Scarlett, Jonathan}, + volume = {202}, + series = {Proceedings of Machine Learning Research}, + month = {23--29 Jul}, + publisher = {PMLR}, + pdf = {https://proceedings.mlr.press/v202/morishita23a/morishita23a.pdf}, + url = {https://proceedings.mlr.press/v202/morishita23a.html}, +} +``` + +### Groups and Tasks + +#### Groups + +* `fld` + +#### Tasks + +This release is the simplified version of FLD where a model is required to predict only an answer. +This setting is described by "answer accuracy" in the original paper. + +* `fld_default` is a basic task based on [FLD.v2](https://huggingface.co/datasets/hitachi-nlp/FLD.v2/viewer/star) +* `fld_star`: is a more challenging version based on [FLD.v2-star](https://huggingface.co/datasets/hitachi-nlp/FLD.v2/viewer/star) + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [x] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/fld/fld_default.yaml b/lm-evaluation-harness/lm_eval/tasks/fld/fld_default.yaml new file mode 100644 index 0000000000000000000000000000000000000000..233a3564a3ffb6d207dd397103a27bd37c43dc22 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/fld/fld_default.yaml @@ -0,0 +1,21 @@ +group: + - fld +task: fld_default +dataset_path: hitachi-nlp/FLD.v2 +dataset_name: default +training_split: train +validation_split: validation +test_split: test +doc_to_text: "Based on the provided facts ($context$), either prove or disprove the hypothesis or state that it is unknown. {{prompt_serial}}" +doc_to_target: world_assump_label +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +metadata: + version: 2.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/fld/fld_star.yaml b/lm-evaluation-harness/lm_eval/tasks/fld/fld_star.yaml new file mode 100644 index 0000000000000000000000000000000000000000..750e808c780001e4659c9def75400f8a2460045e --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/fld/fld_star.yaml @@ -0,0 +1,3 @@ +include: fld_default.yaml +task: fld_star +dataset_name: star diff --git a/lm-evaluation-harness/lm_eval/tasks/qasper/README.md b/lm-evaluation-harness/lm_eval/tasks/qasper/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ada111e1ca7b0df493182939960559bdeb96b9f2 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/qasper/README.md @@ -0,0 +1,63 @@ +# QASPER + +### Paper + +Title: `A Dataset of Information-Seeking Questions and Answers Anchored in Research Papers` + +Abstract: https://arxiv.org/abs/2105.03011 + +QASPER is a dataset of 5,049 questions over 1,585 Natural Language Processing papers. +Each question is written by an NLP practitioner who read only the title and abstract +of the corresponding paper, and the question seeks information present in the full +text. The questions are then answered by a separate set of NLP practitioners who also +provide supporting evidence to answers. + +Homepage: https://allenai.org/data/qasper + +### Citation + +``` +@article{DBLP:journals/corr/abs-2105-03011, + author = {Pradeep Dasigi and + Kyle Lo and + Iz Beltagy and + Arman Cohan and + Noah A. Smith and + Matt Gardner}, + title = {A Dataset of Information-Seeking Questions and Answers Anchored in + Research Papers}, + journal = {CoRR}, + volume = {abs/2105.03011}, + year = {2021}, + url = {https://arxiv.org/abs/2105.03011}, + eprinttype = {arXiv}, + eprint = {2105.03011}, + timestamp = {Fri, 14 May 2021 12:13:30 +0200}, + biburl = {https://dblp.org/rec/journals/corr/abs-2105-03011.bib}, + bibsource = {dblp computer science bibliography, https://dblp.org} +} +``` + +### Groups and Tasks + +#### Groups + +* `qasper`: executes both `qasper_bool` and `qasper_freeform` + +#### Tasks + +* `qasper_bool`: Multiple choice task that evaluates the task with `answer_type="bool"` +* `qasper_freeform`: Greedy generation task that evaluates the samples from the task with `answer_type="free form answer"` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/lm-evaluation-harness/lm_eval/tasks/qasper/bool.yaml b/lm-evaluation-harness/lm_eval/tasks/qasper/bool.yaml new file mode 100644 index 0000000000000000000000000000000000000000..17d3f1be983043ac2ca93038ed29e94c90028592 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/qasper/bool.yaml @@ -0,0 +1,14 @@ +group: qasper +task: qasper_bool +dataset_path: allenai/qasper +output_type: multiple_choice +training_split: train +validation_split: validation +process_docs: !function utils.process_docs_bool +doc_to_text: "TITLE: {{title}}\nABSTRACT: {{abstract}}\n\nQ: {{question}}\n\nA:" +doc_to_target: 1 +doc_to_choice: ["no", "yes"] +metric_list: + - metric: f1 +metadata: + version: 1.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/qasper/freeform.yaml b/lm-evaluation-harness/lm_eval/tasks/qasper/freeform.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ed7a4bc47274f09eb0f52df04723a011e2db13f0 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/qasper/freeform.yaml @@ -0,0 +1,18 @@ +group: qasper +task: qasper_freeform +dataset_path: allenai/qasper +output_type: generate_until +training_split: train +validation_split: validation +process_docs: !function utils.process_docs_freeform +doc_to_text: "TITLE: {{title}}\nABSTRACT: {{abstract}}\n\nQ: {{question}}\n\nA:" +doc_to_target: answer +generation_kwargs: + until: + - "\n" +metric_list: + - metric: !function metrics.f1_abstractive + aggregation: mean + higher_is_better: true +metadata: + version: 2.0 diff --git a/lm-evaluation-harness/lm_eval/tasks/qasper/metrics.py b/lm-evaluation-harness/lm_eval/tasks/qasper/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..cc832912250ae45a4637daaac3f278d0da654ce1 --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/qasper/metrics.py @@ -0,0 +1,41 @@ +import re +import string +from collections import Counter + + +def normalize_answer(s): + """ + Taken from the official evaluation script for v1.1 of the SQuAD dataset. + Lower text and remove punctuation, articles and extra whitespace. + """ + + def remove_articles(text): + return re.sub(r"\b(a|an|the)\b", " ", text) + + def white_space_fix(text): + return " ".join(text.split()) + + def remove_punc(text): + exclude = set(string.punctuation) + return "".join(ch for ch in text if ch not in exclude) + + def lower(text): + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(s)))) + + +def f1_abstractive(predictions, references): + """ + Taken from the official evaluation script for v1.1 of the SQuAD dataset. + """ + prediction_tokens = normalize_answer(predictions[0]).split() + references_tokens = normalize_answer(references[0]).split() + common = Counter(prediction_tokens) & Counter(references_tokens) + num_same = sum(common.values()) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(prediction_tokens) + recall = 1.0 * num_same / len(references_tokens) + f1 = (2 * precision * recall) / (precision + recall) + return f1 diff --git a/lm-evaluation-harness/lm_eval/tasks/qasper/utils.py b/lm-evaluation-harness/lm_eval/tasks/qasper/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fb3d4c55cf7e16a1d2c527510b8ae48d0d3b05fa --- /dev/null +++ b/lm-evaluation-harness/lm_eval/tasks/qasper/utils.py @@ -0,0 +1,72 @@ +from functools import partial + +from datasets import Dataset + + +def process_docs(dataset, set_answer_type="bool"): + FEATURES = ["title", "abstract", "question", "answer", "answer_type"] + + def _categorise_answer(answer_blob): + if answer_blob["unanswerable"]: + answer = "unanswerable" + answer_type = "unanswerable" + return answer, answer_type + elif answer_blob["yes_no"]: + answer = "yes" + answer_type = "bool" + return answer, answer_type + elif answer_blob["free_form_answer"]: + answer = answer_blob["free_form_answer"] + answer_type = "free form answer" + return answer, answer_type + elif answer_blob["extractive_spans"]: + answer = answer_blob["extractive_spans"] + answer_type = "extractive_spans" + return answer, answer_type + elif answer_blob["yes_no"] is False: + answer = "no" + answer_type = "bool" + return answer, answer_type + + def _flatten(doc): + """Given a `doc`, flatten it out so that each JSON blob + contains exactly one question and one answer. Logic taken from + the reference implementation available at + https://github.com/allenai/qasper-led-baseline/blob/main/scripts/evaluator.py + """ + obs_list = { + "title": [], + "abstract": [], + "question": [], + "answer": [], + "answer_type": [], + } + title = doc.pop("title") + abstract = doc.pop("abstract") + for question, answer_list in zip(doc["qas"]["question"], doc["qas"]["answers"]): + for answer_blob in answer_list["answer"]: + answer, answer_type = _categorise_answer(answer_blob) + if answer_type == set_answer_type: + obs_list["title"].append(title) + obs_list["abstract"].append(abstract) + obs_list["question"].append(question) + obs_list["answer_type"].append(answer_type) + if isinstance(answer, list): + answer = ", ".join(answer) + obs_list["answer"].append(answer) + + return obs_list + + dataset = dataset.map( + _flatten, + remove_columns=[key for key in dataset.features.keys() if key not in FEATURES], + ) + new_dataset = {} + for key in dataset.features.keys(): + new_dataset[key] = [x for row in dataset[key] for x in row] + + return Dataset.from_dict(new_dataset) + + +process_docs_bool = partial(process_docs, set_answer_type="bool") +process_docs_freeform = partial(process_docs, set_answer_type="free form answer")