Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation/CITATION.bib +10 -0
- lm-evaluation/CODEOWNERS +1 -0
- lm-evaluation/LICENSE.md +21 -0
- lm-evaluation/README.md +440 -0
- lm-evaluation/eval.sh +5 -0
- lm-evaluation/ignore.txt +8 -0
- lm-evaluation/lm_eval/__init__.py +4 -0
- lm-evaluation/lm_eval/evaluator.py +583 -0
- lm-evaluation/lm_eval/evaluator_utils.py +312 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_astronomy.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_medical_statistics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_conceptual_physics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_education.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_biology.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_physics.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_journalism.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_logical.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_machine_learning.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_security_study.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_sociology.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_sports_science.yaml +4 -0
- lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_world_religions.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/README.md +101 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_age.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_disability.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_gender.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_nationality.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_religion.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_sexual_orientation.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french.yaml +3 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_age.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_disability.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_gender.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_nationality.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_physical_appearance.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_race_color.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_religion.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_socioeconomic.yaml +4 -0
- lm-evaluation/lm_eval/tasks/crows_pairs/utils.py +64 -0
- lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_as.yaml +25 -0
- lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_hi.yaml +25 -0
- lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_kn.yaml +25 -0
- lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_ml.yaml +25 -0
- lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_or.yaml +25 -0
- lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_pa.yaml +25 -0
- lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_ta.yaml +25 -0
lm-evaluation/CITATION.bib
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
@misc{eval-harness,
|
2 |
+
author = {Gao, Leo and Tow, Jonathan and Abbasi, Baber and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and Le Noac'h, Alain and Li, Haonan and McDonell, Kyle and Muennighoff, Niklas and Ociepa, Chris and Phang, Jason and Reynolds, Laria and Schoelkopf, Hailey and Skowron, Aviya and Sutawika, Lintang and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy},
|
3 |
+
title = {A framework for few-shot language model evaluation},
|
4 |
+
month = 12,
|
5 |
+
year = 2023,
|
6 |
+
publisher = {Zenodo},
|
7 |
+
version = {v0.4.0},
|
8 |
+
doi = {10.5281/zenodo.10256836},
|
9 |
+
url = {https://zenodo.org/records/10256836}
|
10 |
+
}
|
lm-evaluation/CODEOWNERS
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
* @haileyschoelkopf @lintangsutawika
|
lm-evaluation/LICENSE.md
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2020 EleutherAI
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
lm-evaluation/README.md
ADDED
@@ -0,0 +1,440 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Language Model Evaluation Harness
|
2 |
+
|
3 |
+
[](https://doi.org/10.5281/zenodo.10256836)
|
4 |
+
|
5 |
+
## Announcement
|
6 |
+
**A new v0.4.0 release of lm-evaluation-harness is available** !
|
7 |
+
|
8 |
+
New updates and features include:
|
9 |
+
|
10 |
+
- Internal refactoring
|
11 |
+
- Config-based task creation and configuration
|
12 |
+
- Easier import and sharing of externally-defined task config YAMLs
|
13 |
+
- Support for Jinja2 prompt design, easy modification of prompts + prompt imports from Promptsource
|
14 |
+
- More advanced configuration options, including output post-processing, answer extraction, and multiple LM generations per document, configurable fewshot settings, and more
|
15 |
+
- Speedups and new modeling libraries supported, including: faster data-parallel HF model usage, vLLM support, MPS support with HuggingFace, and more
|
16 |
+
- Logging and usability changes
|
17 |
+
- New tasks including CoT BIG-Bench-Hard, Belebele, user-defined task groupings, and more
|
18 |
+
|
19 |
+
Please see our updated documentation pages in `docs/` for more details.
|
20 |
+
|
21 |
+
Development will be continuing on the `main` branch, and we encourage you to give us feedback on what features are desired and how to improve the library further, or ask questions, either in issues or PRs on GitHub, or in the [EleutherAI discord](https://discord.gg/eleutherai)!
|
22 |
+
|
23 |
+
## Overview
|
24 |
+
|
25 |
+
This project provides a unified framework to test generative language models on a large number of different evaluation tasks.
|
26 |
+
|
27 |
+
**Features:**
|
28 |
+
- Over 60 standard academic benchmarks for LLMs, with hundreds of subtasks and variants implemented.
|
29 |
+
- Support for models loaded via [transformers](https://github.com/huggingface/transformers/) (including quantization via [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ)), [GPT-NeoX](https://github.com/EleutherAI/gpt-neox), and [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed/), with a flexible tokenization-agnostic interface.
|
30 |
+
- Support for fast and memory-efficient inference with [vLLM](https://github.com/vllm-project/vllm).
|
31 |
+
- Support for commercial APIs including [OpenAI](https://openai.com), and [TextSynth](https://textsynth.com/).
|
32 |
+
- Support for evaluation on adapters (e.g. LoRA) supported in [HuggingFace's PEFT library](https://github.com/huggingface/peft).
|
33 |
+
- Support for local models and benchmarks.
|
34 |
+
- Evaluation with publicly available prompts ensures reproducibility and comparability between papers.
|
35 |
+
- Easy support for custom prompts and evaluation metrics.
|
36 |
+
|
37 |
+
The Language Model Evaluation Harness is the backend for 🤗 Hugging Face's popular [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), has been used in [hundreds of papers](https://scholar.google.com/scholar?oi=bibs&hl=en&authuser=2&cites=15052937328817631261,4097184744846514103,1520777361382155671,17476825572045927382,18443729326628441434,14801318227356878622,7890865700763267262,12854182577605049984,15641002901115500560,5104500764547628290), and is used internally by dozens of organizations including NVIDIA, Cohere, BigScience, BigCode, Nous Research, and Mosaic ML.
|
38 |
+
|
39 |
+
## Install
|
40 |
+
|
41 |
+
To install the `lm-eval` package from the github repository, run:
|
42 |
+
|
43 |
+
```bash
|
44 |
+
git clone https://github.com/EleutherAI/lm-evaluation-harness
|
45 |
+
cd lm-evaluation-harness
|
46 |
+
pip install -e .
|
47 |
+
```
|
48 |
+
|
49 |
+
We also provide a number of optional dependencies for extended functionality. A detailed table is available at the end of this document.
|
50 |
+
|
51 |
+
## Basic Usage
|
52 |
+
|
53 |
+
### Hugging Face `transformers`
|
54 |
+
|
55 |
+
To evaluate a model hosted on the [HuggingFace Hub](https://huggingface.co/models) (e.g. GPT-J-6B) on `hellaswag` you can use the following command (this assumes you are using a CUDA-compatible GPU):
|
56 |
+
|
57 |
+
```bash
|
58 |
+
lm_eval --model hf \
|
59 |
+
--model_args pretrained=EleutherAI/gpt-j-6B \
|
60 |
+
--tasks hellaswag \
|
61 |
+
--device cuda:0 \
|
62 |
+
--batch_size 8
|
63 |
+
```
|
64 |
+
|
65 |
+
Additional arguments can be provided to the model constructor using the `--model_args` flag. Most notably, this supports the common practice of using the `revisions` feature on the Hub to store partially trained checkpoints, or to specify the datatype for running a model:
|
66 |
+
|
67 |
+
```bash
|
68 |
+
lm_eval --model hf \
|
69 |
+
--model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \
|
70 |
+
--tasks lambada_openai,hellaswag \
|
71 |
+
--device cuda:0 \
|
72 |
+
--batch_size 8
|
73 |
+
```
|
74 |
+
|
75 |
+
Models that are loaded via both `transformers.AutoModelForCausalLM` (autoregressive, decoder-only GPT style models) and `transformers.AutoModelForSeq2SeqLM` (such as encoder-decoder models like T5) in Huggingface are supported.
|
76 |
+
|
77 |
+
Batch size selection can be automated by setting the ```--batch_size``` flag to ```auto```. This will perform automatic detection of the largest batch size that will fit on your device. On tasks where there is a large difference between the longest and shortest example, it can be helpful to periodically recompute the largest batch size, to gain a further speedup. To do this, append ```:N``` to above flag to automatically recompute the largest batch size ```N``` times. For example, to recompute the batch size 4 times, the command would be:
|
78 |
+
|
79 |
+
```bash
|
80 |
+
lm_eval --model hf \
|
81 |
+
--model_args pretrained=EleutherAI/pythia-160m,revision=step100000,dtype="float" \
|
82 |
+
--tasks lambada_openai,hellaswag \
|
83 |
+
--device cuda:0 \
|
84 |
+
--batch_size auto:4
|
85 |
+
```
|
86 |
+
|
87 |
+
The full list of supported arguments are provided [here](./docs/interface.md), and on the terminal by calling `lm_eval -h`. Alternatively, you can use `lm-eval` instead of `lm_eval`.
|
88 |
+
|
89 |
+
> [!Note]
|
90 |
+
> Just like you can provide a local path to `transformers.AutoModel`, you can also provide a local path to `lm_eval` via `--model_args pretrained=/path/to/model`
|
91 |
+
|
92 |
+
#### Multi-GPU Evaluation with Hugging Face `accelerate`
|
93 |
+
|
94 |
+
We support two main ways of using Hugging Face's [accelerate 🚀](https://github.com/huggingface/accelerate) library for multi-GPU evaluation.
|
95 |
+
|
96 |
+
To perform *data-parallel evaluation* (where each GPU loads a **separate full copy** of the model), we leverage the `accelerate` launcher as follows:
|
97 |
+
|
98 |
+
```
|
99 |
+
accelerate launch -m lm_eval --model hf \
|
100 |
+
--tasks lambada_openai,arc_easy \
|
101 |
+
--batch_size 16
|
102 |
+
```
|
103 |
+
(or via `accelerate launch --no-python lm_eval`).
|
104 |
+
|
105 |
+
For cases where your model can fit on a single GPU, this allows you to evaluate on K GPUs K times faster than on one.
|
106 |
+
|
107 |
+
**WARNING**: This setup does not work with FSDP model sharding, so in `accelerate config` FSDP must be disabled, or the NO_SHARD FSDP option must be used.
|
108 |
+
|
109 |
+
The second way of using `accelerate` for multi-GPU evaluation is when your model is *too large to fit on a single GPU.*
|
110 |
+
|
111 |
+
In this setting, run the library *outside of the `accelerate` launcher*, but passing `parallelize=True` to `--model_args` as follows:
|
112 |
+
|
113 |
+
```
|
114 |
+
lm_eval --model hf \
|
115 |
+
--tasks lambada_openai,arc_easy \
|
116 |
+
--model_args parallelize=True \
|
117 |
+
--batch_size 16
|
118 |
+
```
|
119 |
+
|
120 |
+
This means that your model's weights will be split across all available GPUs.
|
121 |
+
|
122 |
+
For more advanced users or even larger models, we allow for the following arguments when `parallelize=True` as well:
|
123 |
+
- `device_map_option`: How to split model weights across available GPUs. defaults to "auto".
|
124 |
+
- `max_memory_per_gpu`: the max GPU memory to use per GPU in loading the model.
|
125 |
+
- `max_cpu_memory`: the max amount of CPU memory to use when offloading the model weights to RAM.
|
126 |
+
- `offload_folder`: a folder where model weights will be offloaded to disk if needed.
|
127 |
+
|
128 |
+
These two options (`accelerate launch` and `parallelize=True`) are mutually exclusive.
|
129 |
+
|
130 |
+
**Note: we do not currently support multi-node evaluations natively, and advise using either an externally hosted server to run inference requests against, or creating a custom integration with your distributed framework [as is done for the GPT-NeoX library](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py).**
|
131 |
+
|
132 |
+
### NVIDIA `nemo` models
|
133 |
+
|
134 |
+
[NVIDIA NeMo Framework](https://github.com/NVIDIA/NeMo) is a generative AI framework built for researchers and pytorch developers working on language models.
|
135 |
+
|
136 |
+
To evaluate a `nemo` model, start by installing NeMo following [the documentation](https://github.com/NVIDIA/NeMo?tab=readme-ov-file#installation). We highly recommended to use the NVIDIA PyTorch or NeMo container, especially if having issues installing Apex or any other dependencies (see [latest released containers](https://github.com/NVIDIA/NeMo/releases)). Please also install the lm evaluation harness library following the instructions in [the Install section](https://github.com/EleutherAI/lm-evaluation-harness/tree/main?tab=readme-ov-file#install).
|
137 |
+
|
138 |
+
NeMo models can be obtained through [NVIDIA NGC Catalog](https://catalog.ngc.nvidia.com/models) or in [NVIDIA's Hugging Face page](https://huggingface.co/nvidia). In [NVIDIA NeMo Framework](https://github.com/NVIDIA/NeMo/tree/main/scripts/nlp_language_modeling) there are conversion scripts to convert the `hf` checkpoints of popular models like llama, falcon, mixtral or mpt to `nemo`.
|
139 |
+
|
140 |
+
Run a `nemo` model on one GPU:
|
141 |
+
```bash
|
142 |
+
lm_eval --model nemo_lm \
|
143 |
+
--model_args path=<path_to_nemo_model> \
|
144 |
+
--tasks hellaswag \
|
145 |
+
--batch_size 32
|
146 |
+
```
|
147 |
+
|
148 |
+
It is recommended to unpack the `nemo` model to avoid the unpacking inside the docker container - it may overflow disk space. For that you can run:
|
149 |
+
|
150 |
+
```
|
151 |
+
mkdir MY_MODEL
|
152 |
+
tar -xvf MY_MODEL.nemo -c MY_MODEL
|
153 |
+
```
|
154 |
+
|
155 |
+
#### Multi-GPU evaluation with NVIDIA `nemo` models
|
156 |
+
|
157 |
+
By default, only one GPU is used. But we do support either data replication or tensor/pipeline parallelism during evaluation, on one node.
|
158 |
+
|
159 |
+
1) To enable data replication, set the `model_args` of `devices` to the number of data replicas to run. For example, the command to run 8 data replicas over 8 GPUs is:
|
160 |
+
```bash
|
161 |
+
torchrun --nproc-per-node=8 --no-python lm_eval \
|
162 |
+
--model nemo_lm \
|
163 |
+
--model_args path=<path_to_nemo_model>,devices=8 \
|
164 |
+
--tasks hellaswag \
|
165 |
+
--batch_size 32
|
166 |
+
```
|
167 |
+
|
168 |
+
2) To enable tensor and/or pipeline parallelism, set the `model_args` of `tensor_model_parallel_size` and/or `pipeline_model_parallel_size`. In addition, you also have to set up `devices` to be equal to the product of `tensor_model_parallel_size` and/or `pipeline_model_parallel_size`. For example, the command to use one node of 4 GPUs with tensor parallelism of 2 and pipeline parallelism of 2 is:
|
169 |
+
```bash
|
170 |
+
torchrun --nproc-per-node=4 --no-python lm_eval \
|
171 |
+
--model nemo_lm \
|
172 |
+
--model_args path=<path_to_nemo_model>,devices=4,tensor_model_parallel_size=2,pipeline_model_parallel_size=2 \
|
173 |
+
--tasks hellaswag \
|
174 |
+
--batch_size 32
|
175 |
+
```
|
176 |
+
Note that it is recommended to substitute the `python` command by `torchrun --nproc-per-node=<number of devices> --no-python` to facilitate loading the model into the GPUs. This is especially important for large checkpoints loaded into multiple GPUs.
|
177 |
+
|
178 |
+
Not supported yet: multi-node evaluation and combinations of data replication with tensor or pipeline parallelism.
|
179 |
+
|
180 |
+
### Tensor + Data Parallel and Optimized Inference with `vLLM`
|
181 |
+
|
182 |
+
We also support vLLM for faster inference on [supported model types](https://docs.vllm.ai/en/latest/models/supported_models.html), especially faster when splitting a model across multiple GPUs. For single-GPU or multi-GPU — tensor parallel, data parallel, or a combination of both — inference, for example:
|
183 |
+
|
184 |
+
```bash
|
185 |
+
lm_eval --model vllm \
|
186 |
+
--model_args pretrained={model_name},tensor_parallel_size={GPUs_per_model},dtype=auto,gpu_memory_utilization=0.8,data_parallel_size={model_replicas} \
|
187 |
+
--tasks lambada_openai \
|
188 |
+
--batch_size auto
|
189 |
+
```
|
190 |
+
To use vllm, do `pip install lm_eval[vllm]`. For a full list of supported vLLM configurations, please reference our [vLLM integration](https://github.com/EleutherAI/lm-evaluation-harness/blob/e74ec966556253fbe3d8ecba9de675c77c075bce/lm_eval/models/vllm_causallms.py) and the vLLM documentation.
|
191 |
+
|
192 |
+
vLLM occasionally differs in output from Huggingface. We treat Huggingface as the reference implementation, and provide a [script](./scripts/model_comparator.py) for checking the validity of vllm results against HF.
|
193 |
+
|
194 |
+
> [!Tip]
|
195 |
+
> For fastest performance, we recommend using `--batch_size auto` for vLLM whenever possible, to leverage its continuous batching functionality!
|
196 |
+
|
197 |
+
> [!Tip]
|
198 |
+
> Passing `max_model_len=4096` or some other reasonable default to vLLM through model args may cause speedups or prevent out-of-memory errors when trying to use auto batch size, such as for Mistral-7B-v0.1 which defaults to a maximum length of 32k.
|
199 |
+
|
200 |
+
### Model APIs and Inference Servers
|
201 |
+
|
202 |
+
Our library also supports the evaluation of models served via several commercial APIs, and we hope to implement support for the most commonly used performant local/self-hosted inference servers.
|
203 |
+
|
204 |
+
To call a hosted model, use:
|
205 |
+
|
206 |
+
```bash
|
207 |
+
export OPENAI_API_KEY=YOUR_KEY_HERE
|
208 |
+
lm_eval --model openai-completions \
|
209 |
+
--model_args model=davinci \
|
210 |
+
--tasks lambada_openai,hellaswag
|
211 |
+
```
|
212 |
+
|
213 |
+
We also support using your own local inference server with servers that mirror the OpenAI Completions and ChatCompletions APIs.
|
214 |
+
|
215 |
+
```bash
|
216 |
+
lm_eval --model local-chat-completions --tasks gsm8k --model_args model=facebook/opt-125m,base_url=http://{yourip}:8000/v1
|
217 |
+
```
|
218 |
+
Note that for externally hosted models, configs such as `--device` and `--batch_size` should not be used and do not function. Just like you can use `--model_args` to pass arbitrary arguments to the model constructor for local models, you can use it to pass arbitrary arguments to the model API for hosted models. See the documentation of the hosting service for information on what arguments they support.
|
219 |
+
|
220 |
+
| API or Inference Server | Implemented? | `--model <xxx>` name | Models supported: | Request Types: |
|
221 |
+
|---------------------------------------------------------------------------------------------------------------------------|---------------------------------|---------------------------------------------------------------------|-----------------------------------------------------------------------------------------------|------------------------------------------------------------|
|
222 |
+
| OpenAI Completions | :heavy_check_mark: | `openai-completions`, `local-completions` | All OpenAI Completions API models | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
|
223 |
+
| OpenAI ChatCompletions | :heavy_check_mark: | `openai-chat-completions`, `local-chat-completions` | [All ChatCompletions API models](https://platform.openai.com/docs/guides/gpt) | `generate_until` (no logprobs) |
|
224 |
+
| Anthropic | :heavy_check_mark: | `anthropic` | [Supported Anthropic Engines](https://docs.anthropic.com/claude/reference/selecting-a-model) | `generate_until` (no logprobs) |
|
225 |
+
| Anthropic Chat | :heavy_check_mark: | `anthropic-chat`, `anthropic-chat-completions` | [Supported Anthropic Engines](https://docs.anthropic.com/claude/docs/models-overview) | `generate_until` (no logprobs) |
|
226 |
+
| Textsynth | :heavy_check_mark: | `textsynth` | [All supported engines](https://textsynth.com/documentation.html#engines) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
|
227 |
+
| Cohere | [:hourglass: - blocked on Cohere API bug](https://github.com/EleutherAI/lm-evaluation-harness/pull/395) | N/A | [All `cohere.generate()` engines](https://docs.cohere.com/docs/models) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
|
228 |
+
| [Llama.cpp](https://github.com/ggerganov/llama.cpp) (via [llama-cpp-python](https://github.com/abetlen/llama-cpp-python)) | :heavy_check_mark: | `gguf`, `ggml` | [All models supported by llama.cpp](https://github.com/ggerganov/llama.cpp) | `generate_until`, `loglikelihood`, (perplexity evaluation not yet implemented) |
|
229 |
+
| vLLM | :heavy_check_mark: | `vllm` | [Most HF Causal Language Models](https://docs.vllm.ai/en/latest/models/supported_models.html) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
|
230 |
+
| Mamba | :heavy_check_mark: | `mamba_ssm` | [Mamba architecture Language Models via the `mamba_ssm` package](https://huggingface.co/state-spaces) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` |
|
231 |
+
| Huggingface Optimum (Causal LMs) | ✔️ | `openvino` | Any decoder-only AutoModelForCausalLM converted with Huggingface Optimum into OpenVINO™ Intermediate Representation (IR) format | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... |
|
232 |
+
| Neuron via AWS Inf2 (Causal LMs) | ✔️ | `neuronx` | Any decoder-only AutoModelForCausalLM supported to run on [huggingface-ami image for inferentia2](https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2) | `generate_until`, `loglikelihood`, `loglikelihood_rolling` | ... |
|
233 |
+
| Your local inference server! | :heavy_check_mark: | `local-completions` or `local-chat-completions` (using `openai-chat-completions` model type) | Any server address that accepts GET requests using HF models and mirror's OpenAI's Completions or ChatCompletions interface | `generate_until` | | ... |
|
234 |
+
|
235 |
+
Models which do not supply logits or logprobs can be used with tasks of type `generate_until` only, while local models, or APIs that supply logprobs/logits of their prompts, can be run on all task types: `generate_until`, `loglikelihood`, `loglikelihood_rolling`, and `multiple_choice`.
|
236 |
+
|
237 |
+
For more information on the different task `output_types` and model request types, see [our documentation](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/model_guide.md#interface).
|
238 |
+
|
239 |
+
> [!Note]
|
240 |
+
> For best performance with closed chat model APIs such as Anthropic Claude 3 and GPT-4, we recommend carefully looking at a few sample outputs using `--limit 10` first to confirm answer extraction and scoring on generative tasks is performing as expected. providing `system="<some system prompt here>"` within `--model_args` for anthropic-chat-completions, to instruct the model what format to respond in, may be useful.
|
241 |
+
|
242 |
+
|
243 |
+
### Other Frameworks
|
244 |
+
|
245 |
+
A number of other libraries contain scripts for calling the eval harness through their library. These include [GPT-NeoX](https://github.com/EleutherAI/gpt-neox/blob/main/eval_tasks/eval_adapter.py), [Megatron-DeepSpeed](https://github.com/microsoft/Megatron-DeepSpeed/blob/main/examples/MoE/readme_evalharness.md), and [mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax/blob/master/eval_harness.py).
|
246 |
+
|
247 |
+
To create your own custom integration you can follow instructions from [this tutorial](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md#external-library-usage).
|
248 |
+
|
249 |
+
### Additional Features
|
250 |
+
> [!Note]
|
251 |
+
> For tasks unsuitable for direct evaluation — either due risks associated with executing untrusted code or complexities in the evaluation process — the `--predict_only` flag is available to obtain decoded generations for post-hoc evaluation.
|
252 |
+
|
253 |
+
If you have a Metal compatible Mac, you can run the eval harness using the MPS back-end by replacing `--device cuda:0` with `--device mps` (requires PyTorch version 2.1 or higher).
|
254 |
+
|
255 |
+
> [!Note]
|
256 |
+
> You can inspect what the LM inputs look like by running the following command:
|
257 |
+
> ```bash
|
258 |
+
> python write_out.py \
|
259 |
+
> --tasks <task1,task2,...> \
|
260 |
+
> --num_fewshot 5 \
|
261 |
+
> --num_examples 10 \
|
262 |
+
> --output_base_path /path/to/output/folder
|
263 |
+
> ```
|
264 |
+
> This will write out one text file for each task.
|
265 |
+
|
266 |
+
To verify the data integrity of the tasks you're performing in addition to running the tasks themselves, you can use the `--check_integrity` flag:
|
267 |
+
|
268 |
+
```bash
|
269 |
+
lm_eval --model openai \
|
270 |
+
--model_args engine=davinci \
|
271 |
+
--tasks lambada_openai,hellaswag \
|
272 |
+
--check_integrity
|
273 |
+
```
|
274 |
+
|
275 |
+
## Advanced Usage Tips
|
276 |
+
|
277 |
+
For models loaded with the HuggingFace `transformers` library, any arguments provided via `--model_args` get passed to the relevant constructor directly. This means that anything you can do with `AutoModel` can be done with our library. For example, you can pass a local path via `pretrained=` or use models finetuned with [PEFT](https://github.com/huggingface/peft) by taking the call you would run to evaluate the base model and add `,peft=PATH` to the `model_args` argument:
|
278 |
+
```bash
|
279 |
+
lm_eval --model hf \
|
280 |
+
--model_args pretrained=EleutherAI/gpt-j-6b,parallelize=True,load_in_4bit=True,peft=nomic-ai/gpt4all-j-lora \
|
281 |
+
--tasks openbookqa,arc_easy,winogrande,hellaswag,arc_challenge,piqa,boolq \
|
282 |
+
--device cuda:0
|
283 |
+
```
|
284 |
+
|
285 |
+
[GPTQ](https://github.com/PanQiWei/AutoGPTQ) quantized models can be loaded by specifying their file names in `,autogptq=NAME` (or `,autogptq=True` for default names) in the `model_args` argument:
|
286 |
+
|
287 |
+
```bash
|
288 |
+
lm_eval --model hf \
|
289 |
+
--model_args pretrained=model-name-or-path,autogptq=model.safetensors,gptq_use_triton=True \
|
290 |
+
--tasks hellaswag
|
291 |
+
```
|
292 |
+
|
293 |
+
We support wildcards in task names, for example you can run all of the machine-translated lambada tasks via `--task lambada_openai_mt_*`.
|
294 |
+
|
295 |
+
To save evaluation results provide an `--output_path`. We also support logging model responses with the `--log_samples` flag for post-hoc analysis.
|
296 |
+
|
297 |
+
Additionally, one can provide a directory with `--use_cache` to cache the results of prior runs. This allows you to avoid repeated execution of the same (model, task) pairs for re-scoring.
|
298 |
+
|
299 |
+
For a full list of supported arguments, check out the [interface](https://github.com/EleutherAI/lm-evaluation-harness/blob/main/docs/interface.md) guide in our documentation!
|
300 |
+
|
301 |
+
## Visualizing Results
|
302 |
+
|
303 |
+
You can seamlessly visualize and analyze the results of your evaluation harness runs using both Weights & Biases (W&B) and Zeno.
|
304 |
+
|
305 |
+
### Zeno
|
306 |
+
|
307 |
+
You can use [Zeno](https://zenoml.com) to visualize the results of your eval harness runs.
|
308 |
+
|
309 |
+
First, head to [hub.zenoml.com](https://hub.zenoml.com) to create an account and get an API key [on your account page](https://hub.zenoml.com/account).
|
310 |
+
Add this key as an environment variable:
|
311 |
+
|
312 |
+
```bash
|
313 |
+
export ZENO_API_KEY=[your api key]
|
314 |
+
```
|
315 |
+
|
316 |
+
You'll also need to install the `lm_eval[zeno]` package extra.
|
317 |
+
|
318 |
+
To visualize the results, run the eval harness with the `log_samples` and `output_path` flags.
|
319 |
+
We expect `output_path` to contain multiple folders that represent individual model names.
|
320 |
+
You can thus run your evaluation on any number of tasks and models and upload all of the results as projects on Zeno.
|
321 |
+
|
322 |
+
```bash
|
323 |
+
lm_eval \
|
324 |
+
--model hf \
|
325 |
+
--model_args pretrained=EleutherAI/gpt-j-6B \
|
326 |
+
--tasks hellaswag \
|
327 |
+
--device cuda:0 \
|
328 |
+
--batch_size 8 \
|
329 |
+
--log_samples \
|
330 |
+
--output_path output/gpt-j-6B
|
331 |
+
```
|
332 |
+
|
333 |
+
Then, you can upload the resulting data using the `zeno_visualize` script:
|
334 |
+
|
335 |
+
```bash
|
336 |
+
python scripts/zeno_visualize.py \
|
337 |
+
--data_path output \
|
338 |
+
--project_name "Eleuther Project"
|
339 |
+
```
|
340 |
+
|
341 |
+
This will use all subfolders in `data_path` as different models and upload all tasks within these model folders to Zeno.
|
342 |
+
If you run the eval harness on multiple tasks, the `project_name` will be used as a prefix and one project will be created per task.
|
343 |
+
|
344 |
+
You can find an example of this workflow in [examples/visualize-zeno.ipynb](examples/visualize-zeno.ipynb).
|
345 |
+
|
346 |
+
### Weights and Biases
|
347 |
+
|
348 |
+
With the [Weights and Biases](https://wandb.ai/site) integration, you can now spend more time extracting deeper insights into your evaluation results. The integration is designed to streamline the process of logging and visualizing experiment results using the Weights & Biases (W&B) platform.
|
349 |
+
|
350 |
+
The integration provide functionalities
|
351 |
+
|
352 |
+
- to automatically log the evaluation results,
|
353 |
+
- log the samples as W&B Tables for easy visualization,
|
354 |
+
- log the `results.json` file as an artifact for version control,
|
355 |
+
- log the `<task_name>_eval_samples.json` file if the samples are logged,
|
356 |
+
- generate a comprehensive report for analysis and visualization with all the important metric,
|
357 |
+
- log task and cli specific configs,
|
358 |
+
- and more out of the box like the command used to run the evaluation, GPU/CPU counts, timestamp, etc.
|
359 |
+
|
360 |
+
First you'll need to install the lm_eval[wandb] package extra. Do `pip install lm_eval[wandb]`.
|
361 |
+
|
362 |
+
Authenticate your machine with an your unique W&B token. Visit https://wandb.ai/authorize to get one. Do `wandb login` in your command line terminal.
|
363 |
+
|
364 |
+
Run eval harness as usual with a `wandb_args` flag. Use this flag to provide arguments for initializing a wandb run ([wandb.init](https://docs.wandb.ai/ref/python/init)) as comma separated string arguments.
|
365 |
+
|
366 |
+
```bash
|
367 |
+
lm_eval \
|
368 |
+
--model hf \
|
369 |
+
--model_args pretrained=microsoft/phi-2,trust_remote_code=True \
|
370 |
+
--tasks hellaswag,mmlu_abstract_algebra \
|
371 |
+
--device cuda:0 \
|
372 |
+
--batch_size 8 \
|
373 |
+
--output_path output/phi-2 \
|
374 |
+
--limit 10 \
|
375 |
+
--wandb_args project=lm-eval-harness-integration \
|
376 |
+
--log_samples
|
377 |
+
```
|
378 |
+
|
379 |
+
In the stdout, you will find the link to the W&B run page as well as link to the generated report. You can find an example of this workflow in [examples/visualize-wandb.ipynb](examples/visualize-wandb.ipynb), and an example of how to integrate it beyond the CLI.
|
380 |
+
|
381 |
+
## How to Contribute or Learn More?
|
382 |
+
|
383 |
+
For more information on the library and how everything fits together, check out all of our [documentation pages](https://github.com/EleutherAI/lm-evaluation-harness/tree/main/docs)! We plan to post a larger roadmap of desired + planned library improvements soon, with more information on how contributors can help.
|
384 |
+
|
385 |
+
### Implementing new tasks
|
386 |
+
|
387 |
+
To implement a new task in the eval harness, see [this guide](./docs/new_task_guide.md).
|
388 |
+
|
389 |
+
In general, we follow this priority list for addressing concerns about prompting and other eval details:
|
390 |
+
1. If there is widespread agreement among people who train LLMs, use the agreed upon procedure.
|
391 |
+
2. If there is a clear and unambiguous official implementation, use that procedure.
|
392 |
+
3. If there is widespread agreement among people who evaluate LLMs, use the agreed upon procedure.
|
393 |
+
4. If there are multiple common implementations but not universal or widespread agreement, use our preferred option among the common implementations. As before, prioritize choosing from among the implementations found in LLM training papers.
|
394 |
+
|
395 |
+
These are guidelines and not rules, and can be overruled in special circumstances.
|
396 |
+
|
397 |
+
We try to prioritize agreement with the procedures used by other groups to decrease the harm when people inevitably compare runs across different papers despite our discouragement of the practice. Historically, we also prioritized the implementation from [Language Models are Few Shot Learners](https://arxiv.org/abs/2005.14165) as our original goal was specifically to compare results with that paper.
|
398 |
+
|
399 |
+
### Support
|
400 |
+
|
401 |
+
The best way to get support is to open an issue on this repo or join the [EleutherAI Discord server](https://discord.gg/eleutherai). The `#lm-thunderdome` channel is dedicated to developing this project and the `#release-discussion` channel is for receiving support for our releases. If you've used the library and have had a positive (or negative) experience, we'd love to hear from you!
|
402 |
+
|
403 |
+
## Optional Extras
|
404 |
+
Extras dependencies can be installed via `pip install -e ".[NAME]"`
|
405 |
+
|
406 |
+
| Name | Use |
|
407 |
+
|---------------|---------------------------------------|
|
408 |
+
| anthropic | For using Anthropic's models |
|
409 |
+
| dev | For linting PRs and contributions |
|
410 |
+
| gptq | For loading models with GPTQ |
|
411 |
+
| hf_transfer | For speeding up HF Hub file downloads |
|
412 |
+
| ifeval | For running the IFEval task |
|
413 |
+
| neuronx | For running on AWS inf2 instances |
|
414 |
+
| mamba | For loading Mamba SSM models |
|
415 |
+
| math | For running math task answer checking |
|
416 |
+
| multilingual | For multilingual tokenizers |
|
417 |
+
| openai | For using OpenAI's models |
|
418 |
+
| optimum | For running Intel OpenVINO models |
|
419 |
+
| promptsource | For using PromptSource prompts |
|
420 |
+
| sentencepiece | For using the sentencepiece tokenizer |
|
421 |
+
| testing | For running library test suite |
|
422 |
+
| vllm | For loading models with vLLM |
|
423 |
+
| zeno | For visualizing results with Zeno |
|
424 |
+
|---------------|---------------------------------------|
|
425 |
+
| all | Loads all extras (not recommended) |
|
426 |
+
|
427 |
+
## Cite as
|
428 |
+
|
429 |
+
```
|
430 |
+
@misc{eval-harness,
|
431 |
+
author = {Gao, Leo and Tow, Jonathan and Abbasi, Baber and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and Le Noac'h, Alain and Li, Haonan and McDonell, Kyle and Muennighoff, Niklas and Ociepa, Chris and Phang, Jason and Reynolds, Laria and Schoelkopf, Hailey and Skowron, Aviya and Sutawika, Lintang and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy},
|
432 |
+
title = {A framework for few-shot language model evaluation},
|
433 |
+
month = 12,
|
434 |
+
year = 2023,
|
435 |
+
publisher = {Zenodo},
|
436 |
+
version = {v0.4.0},
|
437 |
+
doi = {10.5281/zenodo.10256836},
|
438 |
+
url = {https://zenodo.org/records/10256836}
|
439 |
+
}
|
440 |
+
```
|
lm-evaluation/eval.sh
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
lm_eval --model hf \
|
2 |
+
--model_args pretrained=EleutherAI/pythia-160m \
|
3 |
+
--tasks indicwikibio-as \
|
4 |
+
--device cuda:0 \
|
5 |
+
--batch_size 32
|
lm-evaluation/ignore.txt
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
ROUGE
|
2 |
+
rouge
|
3 |
+
nin
|
4 |
+
maka
|
5 |
+
mor
|
6 |
+
te
|
7 |
+
ond
|
8 |
+
extraversion
|
lm-evaluation/lm_eval/__init__.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from .evaluator import evaluate, simple_evaluate
|
2 |
+
|
3 |
+
# import habana_frameworks.torch.gpu_migration
|
4 |
+
# import habana_frameworks.torch.core as htcore
|
lm-evaluation/lm_eval/evaluator.py
ADDED
@@ -0,0 +1,583 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
import logging
|
3 |
+
import random
|
4 |
+
import time
|
5 |
+
from collections import defaultdict
|
6 |
+
from typing import TYPE_CHECKING, List, Optional, Union
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
import torch
|
10 |
+
|
11 |
+
import lm_eval.api.metrics
|
12 |
+
import lm_eval.api.registry
|
13 |
+
import lm_eval.models
|
14 |
+
from lm_eval.caching.cache import delete_cache
|
15 |
+
from lm_eval.evaluator_utils import (
|
16 |
+
consolidate_results,
|
17 |
+
get_sample_size,
|
18 |
+
get_task_list,
|
19 |
+
prepare_print_tasks,
|
20 |
+
print_writeout,
|
21 |
+
run_task_tests,
|
22 |
+
)
|
23 |
+
from lm_eval.logging_utils import add_env_info, get_git_commit_hash
|
24 |
+
from lm_eval.tasks import TaskManager, get_task_dict
|
25 |
+
from lm_eval.utils import eval_logger, positional_deprecated, simple_parse_args_string
|
26 |
+
|
27 |
+
|
28 |
+
if TYPE_CHECKING:
|
29 |
+
from lm_eval.api.model import LM
|
30 |
+
from lm_eval.tasks import Task
|
31 |
+
|
32 |
+
|
33 |
+
@positional_deprecated
|
34 |
+
def simple_evaluate(
|
35 |
+
model,
|
36 |
+
model_args: Optional[Union[str, dict]] = None,
|
37 |
+
tasks: Optional[List[Union[str, dict, object]]] = None,
|
38 |
+
num_fewshot: Optional[int] = None,
|
39 |
+
batch_size: Optional[int] = None,
|
40 |
+
max_batch_size: Optional[int] = None,
|
41 |
+
device: Optional[str] = None,
|
42 |
+
use_cache: Optional[str] = None,
|
43 |
+
cache_requests: bool = False,
|
44 |
+
rewrite_requests_cache: bool = False,
|
45 |
+
delete_requests_cache: bool = False,
|
46 |
+
limit: Optional[Union[int, float]] = None,
|
47 |
+
bootstrap_iters: int = 100000,
|
48 |
+
check_integrity: bool = False,
|
49 |
+
write_out: bool = False,
|
50 |
+
log_samples: bool = True,
|
51 |
+
gen_kwargs: Optional[str] = None,
|
52 |
+
task_manager: Optional[TaskManager] = None,
|
53 |
+
verbosity: str = "INFO",
|
54 |
+
predict_only: bool = False,
|
55 |
+
random_seed: int = 0,
|
56 |
+
numpy_random_seed: int = 1234,
|
57 |
+
torch_random_seed: int = 1234,
|
58 |
+
):
|
59 |
+
"""Instantiate and evaluate a model on a list of tasks.
|
60 |
+
|
61 |
+
:param model: Union[str, LM]
|
62 |
+
Name of model or LM object, see lm_eval.models.get_model
|
63 |
+
:param model_args: Optional[str, dict]
|
64 |
+
String or dict arguments for each model class, see LM.create_from_arg_string and LM.create_from_arg_object.
|
65 |
+
Ignored if `model` argument is a LM object.
|
66 |
+
:param tasks: list[Union[str, dict, Task]]
|
67 |
+
List of task names or Task objects. Task objects will be taken to have name task.EVAL_HARNESS_NAME if defined and type(task).__name__ otherwise.
|
68 |
+
:param num_fewshot: int
|
69 |
+
Number of examples in few-shot context
|
70 |
+
:param batch_size: int or str, optional
|
71 |
+
Batch size for model
|
72 |
+
:param max_batch_size: int, optional
|
73 |
+
Maximal batch size to try with automatic batch size detection
|
74 |
+
:param device: str, optional
|
75 |
+
PyTorch device (e.g. "cpu" or "cuda:0") for running models
|
76 |
+
:param use_cache: str, optional
|
77 |
+
A path to a sqlite db file for caching model responses. `None` if not caching.
|
78 |
+
:param cache_requests: bool, optional
|
79 |
+
Speed up evaluation by caching the building of dataset requests. `None` if not caching.
|
80 |
+
:param rewrite_requests_cache: bool, optional
|
81 |
+
Rewrites all of the request cache if set to `True`. `None` if not desired.
|
82 |
+
:param delete_requests_cache: bool, optional
|
83 |
+
Deletes all of the request cache if set to `True`. `None` if not desired.
|
84 |
+
:param limit: int or float, optional
|
85 |
+
Limit the number of examples per task (only use this for testing), If <1, limit is a percentage of the total number of examples.
|
86 |
+
:param bootstrap_iters:
|
87 |
+
Number of iterations for bootstrap statistics
|
88 |
+
:param check_integrity: bool
|
89 |
+
Whether to run the relevant part of the test suite for the tasks
|
90 |
+
:param write_out: bool
|
91 |
+
If True, write out an example document and model input for checking task integrity
|
92 |
+
:param log_samples: bool
|
93 |
+
If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
|
94 |
+
:param gen_kwargs: str
|
95 |
+
String arguments for model generation
|
96 |
+
Ignored for all tasks with loglikelihood output_type
|
97 |
+
:param predict_only: bool
|
98 |
+
If true only model outputs will be generated and returned. Metrics will not be evaluated
|
99 |
+
:param random_seed: int
|
100 |
+
Random seed for python's random module. If set to None, the seed will not be set.
|
101 |
+
:param numpy_random_seed: int
|
102 |
+
Random seed for numpy. If set to None, the seed will not be set.
|
103 |
+
:param torch_random_seed: int
|
104 |
+
Random seed for torch. If set to None, the seed will not be set.
|
105 |
+
|
106 |
+
:return
|
107 |
+
Dictionary of results
|
108 |
+
"""
|
109 |
+
eval_logger.setLevel(getattr(logging, f"{verbosity}"))
|
110 |
+
start_date = time.time()
|
111 |
+
|
112 |
+
if delete_requests_cache:
|
113 |
+
eval_logger.info("Deleting requests cache...")
|
114 |
+
delete_cache()
|
115 |
+
|
116 |
+
seed_message = []
|
117 |
+
if random_seed is not None:
|
118 |
+
# See https://github.com/EleutherAI/lm-evaluation-harness/pull/1412
|
119 |
+
seed_message.append(f"Setting random seed to {random_seed}")
|
120 |
+
random.seed(random_seed)
|
121 |
+
|
122 |
+
if numpy_random_seed is not None:
|
123 |
+
seed_message.append(f"Setting numpy seed to {numpy_random_seed}")
|
124 |
+
np.random.seed(numpy_random_seed)
|
125 |
+
|
126 |
+
if torch_random_seed is not None:
|
127 |
+
seed_message.append(f"Setting torch manual seed to {torch_random_seed}")
|
128 |
+
torch.manual_seed(torch_random_seed)
|
129 |
+
|
130 |
+
if seed_message:
|
131 |
+
eval_logger.info(" | ".join(seed_message))
|
132 |
+
|
133 |
+
if tasks is None:
|
134 |
+
tasks = []
|
135 |
+
if len(tasks) == 0:
|
136 |
+
raise ValueError(
|
137 |
+
"No tasks specified, or no tasks found. Please verify the task names."
|
138 |
+
)
|
139 |
+
|
140 |
+
if gen_kwargs is not None:
|
141 |
+
gen_kwargs = simple_parse_args_string(gen_kwargs)
|
142 |
+
eval_logger.warning(
|
143 |
+
"generation_kwargs specified through cli, these settings will update set parameters in yaml tasks. "
|
144 |
+
"Ensure 'do_sample=True' for non-greedy decoding!"
|
145 |
+
)
|
146 |
+
if gen_kwargs == "":
|
147 |
+
gen_kwargs = None
|
148 |
+
|
149 |
+
if isinstance(model, str):
|
150 |
+
if model_args is None:
|
151 |
+
eval_logger.warning("model_args not specified. Using defaults.")
|
152 |
+
model_args = ""
|
153 |
+
if "pretrained" not in model_args and model in [
|
154 |
+
"hf-auto",
|
155 |
+
"hf",
|
156 |
+
"huggingface",
|
157 |
+
"vllm",
|
158 |
+
]:
|
159 |
+
eval_logger.warning(
|
160 |
+
"pretrained not specified. Using default pretrained=gpt2."
|
161 |
+
)
|
162 |
+
|
163 |
+
if isinstance(model_args, dict):
|
164 |
+
eval_logger.info(
|
165 |
+
f"Initializing {model} model, with arguments: {model_args}"
|
166 |
+
)
|
167 |
+
lm = lm_eval.api.registry.get_model(model).create_from_arg_obj(
|
168 |
+
model_args,
|
169 |
+
{
|
170 |
+
"batch_size": batch_size,
|
171 |
+
"max_batch_size": max_batch_size,
|
172 |
+
"device": device,
|
173 |
+
},
|
174 |
+
)
|
175 |
+
|
176 |
+
else:
|
177 |
+
eval_logger.info(
|
178 |
+
f"Initializing {model} model, with arguments: {simple_parse_args_string(model_args)}"
|
179 |
+
)
|
180 |
+
lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
|
181 |
+
model_args,
|
182 |
+
{
|
183 |
+
"batch_size": batch_size,
|
184 |
+
"max_batch_size": max_batch_size,
|
185 |
+
"device": device,
|
186 |
+
},
|
187 |
+
)
|
188 |
+
else:
|
189 |
+
if not isinstance(model, lm_eval.api.model.LM):
|
190 |
+
raise TypeError
|
191 |
+
eval_logger.info("Using pre-initialized model")
|
192 |
+
lm = model
|
193 |
+
|
194 |
+
if use_cache is not None:
|
195 |
+
eval_logger.info(f"Using cache at {use_cache + '_rank' + str(lm.rank) + '.db'}")
|
196 |
+
lm = lm_eval.api.model.CachingLM(
|
197 |
+
lm,
|
198 |
+
use_cache
|
199 |
+
# each rank receives a different cache db.
|
200 |
+
# necessary to avoid multiple writes to cache at once
|
201 |
+
+ "_rank"
|
202 |
+
+ str(lm.rank)
|
203 |
+
+ ".db",
|
204 |
+
)
|
205 |
+
|
206 |
+
if task_manager is None:
|
207 |
+
task_manager = TaskManager(verbosity)
|
208 |
+
|
209 |
+
task_dict = get_task_dict(tasks, task_manager)
|
210 |
+
for task_name in task_dict.keys():
|
211 |
+
task_obj = task_dict[task_name]
|
212 |
+
if isinstance(task_obj, tuple):
|
213 |
+
_, task_obj = task_obj
|
214 |
+
if task_obj is None:
|
215 |
+
continue
|
216 |
+
|
217 |
+
if task_obj.get_config("output_type") == "generate_until":
|
218 |
+
if gen_kwargs is not None:
|
219 |
+
task_obj.set_config(
|
220 |
+
key="generation_kwargs", value=gen_kwargs, update=True
|
221 |
+
)
|
222 |
+
|
223 |
+
if predict_only:
|
224 |
+
log_samples = True
|
225 |
+
eval_logger.info(
|
226 |
+
f"Processing {task_name} in output-only mode. Metrics will not be calculated!"
|
227 |
+
)
|
228 |
+
# we have to change the class properties post-hoc. This is pretty hacky.
|
229 |
+
task_obj.override_metric(metric_name="bypass")
|
230 |
+
|
231 |
+
# override tasks' fewshot values to the provided num_fewshot arg value
|
232 |
+
# except if tasks have it set to 0 manually in their configs--then we should never overwrite that
|
233 |
+
if num_fewshot is not None:
|
234 |
+
if (default_num_fewshot := task_obj.get_config("num_fewshot")) == 0:
|
235 |
+
eval_logger.info(
|
236 |
+
f"num_fewshot has been set to 0 for {task_name} in its config. Manual configuration will be ignored."
|
237 |
+
)
|
238 |
+
else:
|
239 |
+
eval_logger.warning(
|
240 |
+
f"Overwriting default num_fewshot of {task_name} from {default_num_fewshot} to {num_fewshot}"
|
241 |
+
)
|
242 |
+
task_obj.set_config(key="num_fewshot", value=num_fewshot)
|
243 |
+
else:
|
244 |
+
# if num_fewshot not provided, and the task does not define a default one, default to 0
|
245 |
+
if (default_num_fewshot := task_obj.get_config("num_fewshot")) is None:
|
246 |
+
task_obj.set_config(key="num_fewshot", value=0)
|
247 |
+
|
248 |
+
if check_integrity:
|
249 |
+
run_task_tests(task_list=tasks)
|
250 |
+
|
251 |
+
results = evaluate(
|
252 |
+
lm=lm,
|
253 |
+
task_dict=task_dict,
|
254 |
+
limit=limit,
|
255 |
+
cache_requests=cache_requests,
|
256 |
+
rewrite_requests_cache=rewrite_requests_cache,
|
257 |
+
bootstrap_iters=bootstrap_iters,
|
258 |
+
write_out=write_out,
|
259 |
+
log_samples=log_samples,
|
260 |
+
verbosity=verbosity,
|
261 |
+
)
|
262 |
+
|
263 |
+
if lm.rank == 0:
|
264 |
+
if isinstance(model, str):
|
265 |
+
model_name = model
|
266 |
+
elif hasattr(model, "config") and hasattr(model.config, "_name_or_path"):
|
267 |
+
model_name = model.config._name_or_path
|
268 |
+
else:
|
269 |
+
model_name = type(model).__name__
|
270 |
+
|
271 |
+
# add info about the model and few shot config
|
272 |
+
results["config"] = {
|
273 |
+
"model": model_name,
|
274 |
+
"model_args": model_args,
|
275 |
+
"batch_size": batch_size,
|
276 |
+
"batch_sizes": (
|
277 |
+
list(lm.batch_sizes.values()) if hasattr(lm, "batch_sizes") else []
|
278 |
+
),
|
279 |
+
"device": device,
|
280 |
+
"use_cache": use_cache,
|
281 |
+
"limit": limit,
|
282 |
+
"bootstrap_iters": bootstrap_iters,
|
283 |
+
"gen_kwargs": gen_kwargs,
|
284 |
+
}
|
285 |
+
results["git_hash"] = get_git_commit_hash()
|
286 |
+
results["date"] = start_date
|
287 |
+
add_env_info(results) # additional environment info to results
|
288 |
+
return results
|
289 |
+
else:
|
290 |
+
return None
|
291 |
+
|
292 |
+
|
293 |
+
@positional_deprecated
|
294 |
+
def evaluate(
|
295 |
+
lm: "LM",
|
296 |
+
task_dict,
|
297 |
+
limit: Optional[int] = None,
|
298 |
+
cache_requests: bool = False,
|
299 |
+
rewrite_requests_cache: bool = False,
|
300 |
+
bootstrap_iters: Optional[int] = 100000,
|
301 |
+
write_out: bool = False,
|
302 |
+
log_samples: bool = True,
|
303 |
+
verbosity: str = "INFO",
|
304 |
+
):
|
305 |
+
"""Instantiate and evaluate a model on a list of tasks.
|
306 |
+
|
307 |
+
:param lm: obj
|
308 |
+
Language Model
|
309 |
+
:param task_dict: dict[str, Task]
|
310 |
+
Dictionary of tasks. Tasks will be taken to have name type(task).config.task .
|
311 |
+
:param limit: int, optional
|
312 |
+
Limit the number of examples per task (only use this for testing)
|
313 |
+
:param bootstrap_iters:
|
314 |
+
Number of iterations for bootstrap statistics
|
315 |
+
:param write_out: bool
|
316 |
+
If True, write out an example document and model input for checking task integrity
|
317 |
+
:param log_samples: bool
|
318 |
+
If True, write out all model outputs and documents for per-sample measurement and post-hoc analysis
|
319 |
+
:return
|
320 |
+
Dictionary of results
|
321 |
+
"""
|
322 |
+
|
323 |
+
eval_logger.setLevel(getattr(logging, f"{verbosity}"))
|
324 |
+
|
325 |
+
# tracks all Instances/requests a model must generate output on.
|
326 |
+
requests = defaultdict(list)
|
327 |
+
# stores the amount to pad out reqs per req. type so that
|
328 |
+
# number of fwd passes per distributed rank is equal
|
329 |
+
padding_requests = defaultdict(int)
|
330 |
+
|
331 |
+
# get lists of group hierarchy and each type of request
|
332 |
+
task_hierarchy, eval_tasks = get_task_list(task_dict)
|
333 |
+
if not log_samples:
|
334 |
+
if not all(
|
335 |
+
"bypass" not in getattr(task_output.task, "_metric_fn_list", {}).keys()
|
336 |
+
for task_output in eval_tasks
|
337 |
+
):
|
338 |
+
raise ValueError("log_samples must be True for 'bypass' metric-only tasks")
|
339 |
+
for task_output in eval_tasks:
|
340 |
+
task: Task = task_output.task
|
341 |
+
limit = get_sample_size(task, limit)
|
342 |
+
task.build_all_requests(
|
343 |
+
limit=limit,
|
344 |
+
rank=lm.rank,
|
345 |
+
world_size=lm.world_size,
|
346 |
+
cache_requests=cache_requests,
|
347 |
+
rewrite_requests_cache=rewrite_requests_cache,
|
348 |
+
)
|
349 |
+
eval_logger.debug(
|
350 |
+
f"Task: {task_output.task_name}; number of requests on this rank: {len(task.instances)}"
|
351 |
+
)
|
352 |
+
|
353 |
+
if write_out:
|
354 |
+
print_writeout(task)
|
355 |
+
# aggregate Instances by LM method requested to get output.
|
356 |
+
for instance in task.instances:
|
357 |
+
reqtype = instance.request_type
|
358 |
+
requests[reqtype].append(instance)
|
359 |
+
|
360 |
+
if lm.world_size > 1:
|
361 |
+
instances_rnk = torch.tensor(len(task._instances), device=lm.device)
|
362 |
+
gathered_item = (
|
363 |
+
lm.accelerator.gather(instances_rnk).cpu().detach().numpy().tolist()
|
364 |
+
)
|
365 |
+
# "multiple_choice" task types dispatch (several) "loglikelihood" request types
|
366 |
+
reqtype = (
|
367 |
+
"loglikelihood"
|
368 |
+
if task.OUTPUT_TYPE == "multiple_choice"
|
369 |
+
else task.OUTPUT_TYPE
|
370 |
+
)
|
371 |
+
# compute number of pseudo-batches to pad with (FSDP/DDP require even batches among ranks)
|
372 |
+
numpad = max(gathered_item) - gathered_item[lm.rank]
|
373 |
+
# todo: may not account for padding in cases like SquadV2 which has multiple req types
|
374 |
+
padding_requests[reqtype] += numpad
|
375 |
+
|
376 |
+
### Run LM on inputs, get all outputs ###
|
377 |
+
# execute each type of request
|
378 |
+
for reqtype, reqs in requests.items():
|
379 |
+
eval_logger.info(f"Running {reqtype} requests")
|
380 |
+
# create `K` copies of each request `req` based off `K = req.repeats`
|
381 |
+
cloned_reqs = []
|
382 |
+
for req in reqs:
|
383 |
+
cloned_reqs.extend([req] * req.repeats)
|
384 |
+
|
385 |
+
if (lm.world_size > 1) and (padding_requests[reqtype] > 0):
|
386 |
+
for _ in range(padding_requests[reqtype]):
|
387 |
+
cloned_reqs.extend([req] * req.repeats)
|
388 |
+
|
389 |
+
# run requests through model
|
390 |
+
resps = getattr(lm, reqtype)(cloned_reqs)
|
391 |
+
|
392 |
+
# put responses from model into a list of length K for each request.
|
393 |
+
for x, req in zip(resps, cloned_reqs):
|
394 |
+
req.resps.append(x)
|
395 |
+
|
396 |
+
if lm.world_size > 1:
|
397 |
+
lm.accelerator.wait_for_everyone()
|
398 |
+
|
399 |
+
RANK = lm.rank
|
400 |
+
WORLD_SIZE = lm.world_size
|
401 |
+
### Postprocess outputs ###
|
402 |
+
# TODO: del model here, maybe (idea: allow user to specify device of e.g. reward model separately)
|
403 |
+
for task_output in eval_tasks:
|
404 |
+
task = task_output.task
|
405 |
+
task.apply_filters()
|
406 |
+
|
407 |
+
### Collect values of metrics on all datapoints ###
|
408 |
+
# # unpack results and sort back in order and return control to Task
|
409 |
+
# TODO: make it possible to use a different metric per filter
|
410 |
+
# Pre-process task.instances to group by doc_id
|
411 |
+
instances_by_doc_id = defaultdict(list)
|
412 |
+
for instance in task.instances:
|
413 |
+
instances_by_doc_id[instance.doc_id].append(instance)
|
414 |
+
# Sort instances within each group
|
415 |
+
for instances in instances_by_doc_id.values():
|
416 |
+
instances.sort(key=lambda x: x.idx)
|
417 |
+
# iterate over different filters used
|
418 |
+
for filter_key in task.instances[0].filtered_resps.keys():
|
419 |
+
doc_iterator = task.doc_iterator(
|
420 |
+
rank=RANK, limit=limit, world_size=WORLD_SIZE
|
421 |
+
)
|
422 |
+
for doc_id, doc in doc_iterator:
|
423 |
+
requests = instances_by_doc_id[doc_id]
|
424 |
+
metrics = task.process_results(
|
425 |
+
doc, [req.filtered_resps[filter_key] for req in requests]
|
426 |
+
)
|
427 |
+
if log_samples:
|
428 |
+
target = task.doc_to_target(doc)
|
429 |
+
example = {
|
430 |
+
"doc_id": doc_id,
|
431 |
+
"doc": doc,
|
432 |
+
"target": target,
|
433 |
+
"arguments": [req.args for req in requests],
|
434 |
+
"resps": [req.resps for req in requests],
|
435 |
+
"filtered_resps": [
|
436 |
+
req.filtered_resps[filter_key] for req in requests
|
437 |
+
],
|
438 |
+
}
|
439 |
+
example.update(metrics)
|
440 |
+
task_output.logged_samples.append(example)
|
441 |
+
for metric, value in metrics.items():
|
442 |
+
task_output.sample_metrics[(metric, filter_key)].append(value)
|
443 |
+
|
444 |
+
if WORLD_SIZE > 1:
|
445 |
+
# if multigpu, then gather data across all ranks to rank 0
|
446 |
+
# first gather logged samples across all ranks
|
447 |
+
for task_output in eval_tasks:
|
448 |
+
if log_samples:
|
449 |
+
# for task_name, task_samples in list(samples.items()):
|
450 |
+
full_samples = [None] * WORLD_SIZE
|
451 |
+
torch.distributed.all_gather_object(
|
452 |
+
obj=task_output.logged_samples,
|
453 |
+
object_list=full_samples,
|
454 |
+
)
|
455 |
+
|
456 |
+
if RANK == 0:
|
457 |
+
task_output.logged_samples = list(
|
458 |
+
itertools.chain.from_iterable(full_samples)
|
459 |
+
)
|
460 |
+
|
461 |
+
# then collect metrics across all ranks
|
462 |
+
for metrics in task_output.sample_metrics:
|
463 |
+
metric_list = [None] * WORLD_SIZE
|
464 |
+
torch.distributed.all_gather_object(
|
465 |
+
obj=task_output.sample_metrics[metrics],
|
466 |
+
object_list=metric_list,
|
467 |
+
)
|
468 |
+
if RANK == 0:
|
469 |
+
task_output.sample_metrics[metrics] = list(
|
470 |
+
itertools.chain.from_iterable(metric_list)
|
471 |
+
)
|
472 |
+
|
473 |
+
if RANK == 0:
|
474 |
+
### Aggregate results over all datapoints ###
|
475 |
+
# aggregate results ; run bootstrap CIs
|
476 |
+
for task_output in eval_tasks:
|
477 |
+
task_output.calculate_aggregate_metric(bootstrap_iters=bootstrap_iters)
|
478 |
+
results, samples, configs, versions, num_fewshot = consolidate_results(
|
479 |
+
eval_tasks
|
480 |
+
)
|
481 |
+
|
482 |
+
### Calculate group metrics ###
|
483 |
+
if bool(results):
|
484 |
+
for group, task_list in reversed(task_hierarchy.items()):
|
485 |
+
if len(task_list) == 0:
|
486 |
+
# task_hierarchy entries are either
|
487 |
+
# `group_name: [subtask1, subtask2, ...]`
|
488 |
+
# or `task_name: []`.
|
489 |
+
# we only want to operate on groups here.
|
490 |
+
continue
|
491 |
+
metric_list = list(
|
492 |
+
{
|
493 |
+
key
|
494 |
+
for task in task_list
|
495 |
+
for key in results[task].keys()
|
496 |
+
if "_stderr" not in key and key not in ["alias", "samples"]
|
497 |
+
}
|
498 |
+
)
|
499 |
+
for metric in metric_list:
|
500 |
+
stderr = "_stderr,".join(metric.split(","))
|
501 |
+
|
502 |
+
# gather metrics, sizes, and stderrs from subtasks
|
503 |
+
metrics = [
|
504 |
+
results[task][metric]
|
505 |
+
for task in task_list
|
506 |
+
if metric in results[task]
|
507 |
+
] # TODO: copy?
|
508 |
+
stderrs = [
|
509 |
+
results[task][stderr]
|
510 |
+
for task in task_list
|
511 |
+
if stderr in results[task]
|
512 |
+
]
|
513 |
+
sizes = [
|
514 |
+
results[task]["samples"]
|
515 |
+
for task in task_list
|
516 |
+
if metric in results[task]
|
517 |
+
]
|
518 |
+
|
519 |
+
# compute group's pooled metric and stderr
|
520 |
+
results[group][
|
521 |
+
metric
|
522 |
+
] = lm_eval.api.metrics.aggregate_subtask_metrics(metrics, sizes)
|
523 |
+
# TODO: calculate grouped metric using aggregation fn
|
524 |
+
if "N/A" in stderrs:
|
525 |
+
results[group][stderr] = "N/A"
|
526 |
+
else:
|
527 |
+
results[group][
|
528 |
+
stderr
|
529 |
+
] = lm_eval.api.metrics.pooled_sample_stderr(stderrs, sizes)
|
530 |
+
# TODO: allow GroupConfigs to choose which variance formula is used, for back-compatibility
|
531 |
+
# To use the old (likely incorrect) variance formula, comment out the above and uncomment this line:
|
532 |
+
# results[group][stderr] = lm_eval.api.metrics.combined_sample_stderr(stderrs, sizes, metrics=metrics)
|
533 |
+
|
534 |
+
results[group]["samples"] = sum(sizes)
|
535 |
+
|
536 |
+
results_agg = defaultdict(dict)
|
537 |
+
groups_agg = defaultdict(dict)
|
538 |
+
all_tasks_list = list(task_hierarchy.keys())
|
539 |
+
while True:
|
540 |
+
add_tasks_list = list(k for k in results_agg.keys())
|
541 |
+
left_tasks_list = sorted(list(set(all_tasks_list) - set(add_tasks_list)))
|
542 |
+
if len(left_tasks_list) == 0:
|
543 |
+
break
|
544 |
+
|
545 |
+
_task_hierarchy = {
|
546 |
+
k: v for k, v in task_hierarchy.items() if k in left_tasks_list
|
547 |
+
}
|
548 |
+
_results_agg, _groups_agg = prepare_print_tasks(_task_hierarchy, results)
|
549 |
+
|
550 |
+
results_agg = {**results_agg, **_results_agg}
|
551 |
+
groups_agg = {**groups_agg, **_groups_agg}
|
552 |
+
|
553 |
+
for group_name, task_list in task_hierarchy.items():
|
554 |
+
if task_list:
|
555 |
+
num_fewshot[group_name] = num_fewshot[
|
556 |
+
task_list[0]
|
557 |
+
] # TODO: validate this
|
558 |
+
|
559 |
+
results_dict = {
|
560 |
+
"results": dict(results_agg.items()),
|
561 |
+
**({"groups": dict(groups_agg.items())} if bool(groups_agg) else {}),
|
562 |
+
"group_subtasks": dict(reversed(task_hierarchy.items())),
|
563 |
+
"configs": dict(sorted(configs.items())),
|
564 |
+
"versions": dict(sorted(versions.items())),
|
565 |
+
"n-shot": dict(sorted(num_fewshot.items())),
|
566 |
+
}
|
567 |
+
if log_samples:
|
568 |
+
results_dict["samples"] = dict(samples)
|
569 |
+
|
570 |
+
return results_dict
|
571 |
+
|
572 |
+
else:
|
573 |
+
return None
|
574 |
+
|
575 |
+
|
576 |
+
def request_caching_arg_to_dict(cache_requests: str) -> dict:
|
577 |
+
request_caching_args = {
|
578 |
+
"cache_requests": cache_requests in {"true", "refresh"},
|
579 |
+
"rewrite_requests_cache": cache_requests == "refresh",
|
580 |
+
"delete_requests_cache": cache_requests == "delete",
|
581 |
+
}
|
582 |
+
|
583 |
+
return request_caching_args
|
lm-evaluation/lm_eval/evaluator_utils.py
ADDED
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import collections
|
2 |
+
import math
|
3 |
+
import pathlib
|
4 |
+
import sys
|
5 |
+
from typing import Dict, List, Optional, Tuple, Union
|
6 |
+
|
7 |
+
from lm_eval.api import metrics
|
8 |
+
from lm_eval.utils import eval_logger, positional_deprecated
|
9 |
+
|
10 |
+
|
11 |
+
class TaskOutput:
|
12 |
+
"""
|
13 |
+
Wrapper class for Task outputs.It contains various attributes and methods to manage and calculate metrics for the task.
|
14 |
+
|
15 |
+
Attributes:
|
16 |
+
task (object): The task object.
|
17 |
+
task_name (str): The name of the task.
|
18 |
+
task_config (dict): The configuration of the task.
|
19 |
+
version (str): The version of the task.
|
20 |
+
group_name (str): The name of the task group.
|
21 |
+
n_shot (int): The number of shots for the task.
|
22 |
+
task_alias (str): The alias of the task.
|
23 |
+
group_alias (str): The alias of the task group.
|
24 |
+
is_group (bool): Indicates if the task is a group.
|
25 |
+
logged_samples (list): The list of logged samples.
|
26 |
+
sample_len (int): The length of the samples.
|
27 |
+
sample_metrics (defaultdict): The dictionary of samples' metrics.
|
28 |
+
agg_metrics (defaultdict): The dictionary of aggregate metrics.
|
29 |
+
|
30 |
+
Methods:
|
31 |
+
from_taskdict(cls, task_name: str, task):
|
32 |
+
Creates a TaskOutput instance from a task dictionary.
|
33 |
+
|
34 |
+
calculate_aggregate_metric(bootstrap_iters=100000) -> None:
|
35 |
+
Calculates the aggregate metrics for the task.
|
36 |
+
"""
|
37 |
+
|
38 |
+
def __init__(
|
39 |
+
self,
|
40 |
+
task=None,
|
41 |
+
task_name=None,
|
42 |
+
task_config=None,
|
43 |
+
version=None,
|
44 |
+
group_name=None,
|
45 |
+
n_shot=None,
|
46 |
+
task_alias=None,
|
47 |
+
group_alias=None,
|
48 |
+
is_group=None,
|
49 |
+
):
|
50 |
+
self.task = task
|
51 |
+
self.task_config = task_config
|
52 |
+
self.task_name = task_name
|
53 |
+
self.group_name = group_name
|
54 |
+
self.version = version
|
55 |
+
self.n_shot = n_shot
|
56 |
+
self.task_alias = task_alias
|
57 |
+
self.group_alias = group_alias
|
58 |
+
self.is_group = is_group
|
59 |
+
self.logged_samples = []
|
60 |
+
self.sample_len = None
|
61 |
+
self.sample_metrics = collections.defaultdict(list)
|
62 |
+
self.agg_metrics = collections.defaultdict(list)
|
63 |
+
|
64 |
+
@classmethod
|
65 |
+
def from_taskdict(cls, task_name: str, task):
|
66 |
+
if isinstance(task, tuple):
|
67 |
+
group_name, task = task
|
68 |
+
else:
|
69 |
+
group_name = None
|
70 |
+
if not task:
|
71 |
+
# these gets filtered out in get_task_list
|
72 |
+
# once they are added to group hierarchy
|
73 |
+
is_group = True
|
74 |
+
return cls(
|
75 |
+
task=task, task_name=task_name, is_group=is_group, group_name=group_name
|
76 |
+
)
|
77 |
+
version = task.VERSION
|
78 |
+
task_config = dict(task.dump_config())
|
79 |
+
if (n_shot := task_config.get("num_fewshot")) == 0:
|
80 |
+
n_shot = task_config.get("metadata", {}).get("num_fewshot", 0)
|
81 |
+
task_alias = task_config.get("alias")
|
82 |
+
group_alias = task_config.get("group_alias")
|
83 |
+
return cls(
|
84 |
+
task=task,
|
85 |
+
task_name=task_name,
|
86 |
+
task_config=task_config,
|
87 |
+
group_name=group_name,
|
88 |
+
version=version,
|
89 |
+
n_shot=n_shot,
|
90 |
+
task_alias=task_alias,
|
91 |
+
group_alias=group_alias,
|
92 |
+
)
|
93 |
+
|
94 |
+
def calculate_aggregate_metric(self, bootstrap_iters=100000) -> None:
|
95 |
+
for (metric, filter_key), items in self.sample_metrics.items():
|
96 |
+
agg_fn = self.task.aggregation()[metric]
|
97 |
+
metric_key = f"{metric},{filter_key}"
|
98 |
+
self.agg_metrics[metric_key] = agg_fn(items)
|
99 |
+
self.sample_len = len(items) # TODO: same sample size for each metric?
|
100 |
+
if bootstrap_iters:
|
101 |
+
stderr_fn = metrics.stderr_for_metric(
|
102 |
+
metric=agg_fn,
|
103 |
+
bootstrap_iters=min(bootstrap_iters, 100)
|
104 |
+
if metric in ["bleu", "chrf", "ter"]
|
105 |
+
else bootstrap_iters,
|
106 |
+
)
|
107 |
+
self.agg_metrics[f"{metric}_stderr,{filter_key}"] = (
|
108 |
+
stderr_fn(items) if (stderr_fn and len(items) > 1) else "N/A"
|
109 |
+
)
|
110 |
+
|
111 |
+
def __repr__(self):
|
112 |
+
return (
|
113 |
+
f"TaskOutput(task_name={self.task_name}, "
|
114 |
+
f"group_name={self.group_name}, "
|
115 |
+
f"version={self.version},"
|
116 |
+
f"n_shot={self.n_shot}"
|
117 |
+
f"task_alias={self.task_alias}, group_alias={self.group_alias})"
|
118 |
+
)
|
119 |
+
|
120 |
+
|
121 |
+
def get_task_list(task_dict: dict) -> Tuple[Dict[str, list], List[TaskOutput]]:
|
122 |
+
task_hierarchy = collections.defaultdict(list)
|
123 |
+
outputs = list(TaskOutput.from_taskdict(x, y) for x, y in task_dict.items())
|
124 |
+
for task_output in outputs:
|
125 |
+
if group_name := task_output.group_name:
|
126 |
+
task_hierarchy[group_name].append(task_output.task_name)
|
127 |
+
else:
|
128 |
+
task_hierarchy[task_output.task_name] = []
|
129 |
+
# returns task_hierarchy tracking which groups contain which subtasks,
|
130 |
+
# and a list of TaskOutput classes for each non-group subtask
|
131 |
+
return task_hierarchy, [x for x in outputs if x.task]
|
132 |
+
|
133 |
+
|
134 |
+
def print_writeout(task) -> None:
|
135 |
+
for inst in task.instances:
|
136 |
+
# print the prompt for the first few documents
|
137 |
+
if inst.doc_id < 1:
|
138 |
+
eval_logger.info(
|
139 |
+
f"Task: {task}; document {inst.doc_id}; context prompt (starting on next line):\
|
140 |
+
\n{inst.args[0]}\n(end of prompt on previous line)\ntarget string or answer choice index (starting on next line):\n{task.doc_to_target(inst.doc)}\n(end of target on previous line)"
|
141 |
+
)
|
142 |
+
eval_logger.info(f"Request: {str(inst)}")
|
143 |
+
|
144 |
+
|
145 |
+
def get_sample_size(task, limit: Optional[int]) -> Union[int, None]:
|
146 |
+
if limit is not None:
|
147 |
+
limit = (
|
148 |
+
int(math.ceil(len(task.eval_docs) * limit)) if limit < 1.0 else int(limit)
|
149 |
+
)
|
150 |
+
return limit
|
151 |
+
|
152 |
+
|
153 |
+
def prepare_print_tasks(
|
154 |
+
task_hierarchy: dict, results: dict, tab=0
|
155 |
+
) -> Tuple[dict, dict]:
|
156 |
+
"""
|
157 |
+
@param task_hierarchy: Dictionary representing the group hierarchy of tasks. Each key is a group name and its
|
158 |
+
value is a list of task names.
|
159 |
+
@param results: Dictionary containing the results of each task. Each key is a
|
160 |
+
group name and its value is a dictionary of task results.
|
161 |
+
@param tab: The indentation level for printing the task
|
162 |
+
hierarchy. Default is 0.
|
163 |
+
@return: A tuple of two dictionaries: results_agg and groups_agg. results_agg contains
|
164 |
+
aggregated results for each task, and groups_agg contains aggregated results for each group.
|
165 |
+
|
166 |
+
Prepares the task hierarchy and aggregates the results for each task and group recursively for printing.
|
167 |
+
"""
|
168 |
+
results_agg = collections.defaultdict(dict)
|
169 |
+
groups_agg = collections.defaultdict(dict)
|
170 |
+
|
171 |
+
(group_name, task_list), *_ = task_hierarchy.items()
|
172 |
+
task_list = sorted(task_list)
|
173 |
+
|
174 |
+
results_agg[group_name] = results[group_name].copy()
|
175 |
+
# results_agg[group_name]["tab"] = tab
|
176 |
+
if "samples" in results_agg[group_name]:
|
177 |
+
results_agg[group_name].pop("samples")
|
178 |
+
|
179 |
+
tab_string = " " * tab + "- " if tab > 0 else ""
|
180 |
+
|
181 |
+
if "alias" in results_agg[group_name]:
|
182 |
+
results_agg[group_name]["alias"] = tab_string + results_agg[group_name]["alias"]
|
183 |
+
else:
|
184 |
+
results_agg[group_name]["alias"] = tab_string + group_name
|
185 |
+
|
186 |
+
if len(task_list) > 0:
|
187 |
+
groups_agg[group_name] = results[group_name].copy()
|
188 |
+
# groups_agg[group_name]["tab"] = tab
|
189 |
+
if "samples" in groups_agg[group_name]:
|
190 |
+
groups_agg[group_name].pop("samples")
|
191 |
+
|
192 |
+
if "alias" in groups_agg[group_name]:
|
193 |
+
groups_agg[group_name]["alias"] = (
|
194 |
+
tab_string + groups_agg[group_name]["alias"]
|
195 |
+
)
|
196 |
+
else:
|
197 |
+
groups_agg[group_name]["alias"] = tab_string + group_name
|
198 |
+
|
199 |
+
for task_name in task_list:
|
200 |
+
if task_name in task_hierarchy:
|
201 |
+
_task_hierarchy = {
|
202 |
+
**{task_name: task_hierarchy[task_name]},
|
203 |
+
**task_hierarchy,
|
204 |
+
}
|
205 |
+
else:
|
206 |
+
_task_hierarchy = {
|
207 |
+
**{task_name: []},
|
208 |
+
**task_hierarchy,
|
209 |
+
}
|
210 |
+
|
211 |
+
_results_agg, _groups_agg = prepare_print_tasks(
|
212 |
+
_task_hierarchy, results, tab + 1
|
213 |
+
)
|
214 |
+
results_agg = {**results_agg, **_results_agg}
|
215 |
+
groups_agg = {**groups_agg, **_groups_agg}
|
216 |
+
|
217 |
+
return results_agg, groups_agg
|
218 |
+
|
219 |
+
|
220 |
+
def consolidate_results(
|
221 |
+
eval_tasks: List[TaskOutput],
|
222 |
+
) -> Tuple[dict, dict, dict, dict, dict]:
|
223 |
+
"""
|
224 |
+
@param eval_tasks: list(TaskOutput).
|
225 |
+
@return: A tuple containing the consolidated results, samples, configs, versions, and num_fewshot.
|
226 |
+
|
227 |
+
Consolidates the results of multiple evaluation tasks into a single structure.
|
228 |
+
|
229 |
+
The method iterates over each evaluation instance and extracts relevant information to create the consolidated
|
230 |
+
results structure. The consolidated results structure has the following properties:
|
231 |
+
|
232 |
+
- results: A defaultdict with task names as keys and dictionaries as values. Each dictionary contains
|
233 |
+
metric/filter pairs as keys and corresponding metric values as values. The "alias" key is used to store task
|
234 |
+
aliases specified in the task configuration.
|
235 |
+
- samples: A defaultdict with task names as keys and lists of log samples as values.
|
236 |
+
- configs: A defaultdict with task names as keys and task configurations as values.
|
237 |
+
- versions: A defaultdict with task names as keys and task versions as values.
|
238 |
+
- num_fewshot: A defaultdict with task names as keys and number of few-shot samples as values.
|
239 |
+
|
240 |
+
The method then returns the consolidated results, samples, configs, versions, and num_fewshot as a tuple.
|
241 |
+
"""
|
242 |
+
# stores the final result for each task, for each metric/filter pair.
|
243 |
+
results = collections.defaultdict(dict)
|
244 |
+
# logs info about each document evaluated.
|
245 |
+
samples = collections.defaultdict(list)
|
246 |
+
# store num-fewshot value per task
|
247 |
+
num_fewshot = collections.defaultdict(int)
|
248 |
+
# Tracks the YAML configs of all chosen task
|
249 |
+
configs = collections.defaultdict(dict)
|
250 |
+
# Tracks each task's version.
|
251 |
+
versions = collections.defaultdict(dict)
|
252 |
+
for task_output in eval_tasks:
|
253 |
+
if "task_alias" in (task_config := task_output.task_config):
|
254 |
+
results[task_output.task_name]["alias"] = task_config["task_alias"]
|
255 |
+
if group_alias := task_output.group_alias:
|
256 |
+
if group_alias not in results and (group_name := task_output.group_name):
|
257 |
+
results[group_name]["alias"] = group_alias
|
258 |
+
num_fewshot[task_output.task_name] = task_output.n_shot
|
259 |
+
configs[task_output.task_name] = task_output.task_config
|
260 |
+
versions[task_output.task_name] = task_output.version
|
261 |
+
samples[task_output.task_name] = task_output.logged_samples
|
262 |
+
for (metric, filter_key), items in task_output.sample_metrics.items():
|
263 |
+
metric_key = f"{metric},{filter_key}"
|
264 |
+
results[task_output.task_name][metric_key] = task_output.agg_metrics[
|
265 |
+
metric_key
|
266 |
+
]
|
267 |
+
results[task_output.task_name]["samples"] = task_output.sample_len
|
268 |
+
results[task_output.task_name][
|
269 |
+
f"{metric}_stderr,{filter_key}"
|
270 |
+
] = task_output.agg_metrics[f"{metric}_stderr,{filter_key}"]
|
271 |
+
return results, samples, configs, versions, num_fewshot
|
272 |
+
|
273 |
+
|
274 |
+
@positional_deprecated
|
275 |
+
def find_test_root(start_path: pathlib.Path) -> pathlib.Path:
|
276 |
+
"""
|
277 |
+
Search upward in the directory tree to a maximum of three layers
|
278 |
+
to find and return the package root (containing the 'tests' folder)
|
279 |
+
"""
|
280 |
+
cur_path = start_path.resolve()
|
281 |
+
max_layers = 3
|
282 |
+
for _ in range(max_layers):
|
283 |
+
if (cur_path / "tests" / "test_version_stable.py").exists():
|
284 |
+
return cur_path
|
285 |
+
else:
|
286 |
+
cur_path = cur_path.parent.resolve()
|
287 |
+
raise FileNotFoundError(
|
288 |
+
f"Unable to find package root within {max_layers} upwards" + f"of {start_path}"
|
289 |
+
)
|
290 |
+
|
291 |
+
|
292 |
+
@positional_deprecated
|
293 |
+
def run_task_tests(task_list: List[str]):
|
294 |
+
"""
|
295 |
+
Find the package root and run the tests for the given tasks
|
296 |
+
"""
|
297 |
+
import pytest
|
298 |
+
|
299 |
+
package_root = find_test_root(start_path=pathlib.Path(__file__))
|
300 |
+
task_string = " or ".join(task_list)
|
301 |
+
args = [
|
302 |
+
f"{package_root}/tests/test_version_stable.py",
|
303 |
+
f"--rootdir={package_root}",
|
304 |
+
"-k",
|
305 |
+
f"{task_string}",
|
306 |
+
]
|
307 |
+
sys.path.append(str(package_root))
|
308 |
+
pytest_return_val = pytest.main(args)
|
309 |
+
if pytest_return_val:
|
310 |
+
raise ValueError(
|
311 |
+
f"Not all tests for the specified tasks ({task_list}) ran successfully! Error code: {pytest_return_val}"
|
312 |
+
)
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_anatomy.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "anatomy"
|
2 |
+
"description": "以下是关于解剖学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_anatomy"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_astronomy.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "astronomy"
|
2 |
+
"description": "以下是关于天文学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_astronomy"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_law.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_law"
|
2 |
+
"description": "以下是关于大学法律的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_college_law"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_college_medical_statistics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "college_medical_statistics"
|
2 |
+
"description": "以下是关于大学医学统计的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_college_medical_statistics"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_conceptual_physics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "conceptual_physics"
|
2 |
+
"description": "以下是关于概念物理学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_conceptual_physics"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_education.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "education"
|
2 |
+
"description": "以下是关于教育学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_education"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_ethnology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "ethnology"
|
2 |
+
"description": "以下是关于民族学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_ethnology"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_global_facts.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "global_facts"
|
2 |
+
"description": "以下是关于全球事实的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_global_facts"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_biology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_biology"
|
2 |
+
"description": "以下是关于高中生物的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_high_school_biology"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_high_school_physics.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "high_school_physics"
|
2 |
+
"description": "以下是关于高中物理学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_high_school_physics"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_journalism.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "journalism"
|
2 |
+
"description": "以下是关于新闻学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_journalism"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_logical.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "logical"
|
2 |
+
"description": "以下是关于逻辑学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_logical"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_machine_learning.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "machine_learning"
|
2 |
+
"description": "以下是关于机器学习的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_machine_learning"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_security_study.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "security_study"
|
2 |
+
"description": "以下是关于安全研究的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_security_study"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_sociology.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "sociology"
|
2 |
+
"description": "以下是关于社会学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_sociology"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_sports_science.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "sports_science"
|
2 |
+
"description": "以下是关于体育学的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_sports_science"
|
lm-evaluation/lm_eval/tasks/cmmlu/cmmlu_default_world_religions.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"dataset_name": "world_religions"
|
2 |
+
"description": "以下是关于世界宗教的单项选择题,请直接给出正确答案的选项。\n\n"
|
3 |
+
"include": "_default_template_yaml"
|
4 |
+
"task": "cmmlu_world_religions"
|
lm-evaluation/lm_eval/tasks/crows_pairs/README.md
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# CrowS-Pairs
|
2 |
+
|
3 |
+
### Paper
|
4 |
+
|
5 |
+
CrowS-Pairs: A Challenge Dataset for Measuring Social Biases in Masked Language Models
|
6 |
+
https://aclanthology.org/2020.emnlp-main.154/
|
7 |
+
French CrowS-Pairs: Extending a challenge dataset for measuring social bias in masked
|
8 |
+
language models to a language other than English
|
9 |
+
https://aclanthology.org/2022.acl-long.583/
|
10 |
+
|
11 |
+
CrowS-Pairs is a challenge set for evaluating what language models (LMs) on their tendency
|
12 |
+
to generate biased outputs. CrowS-Pairs comes in 2 languages and the English subset has
|
13 |
+
a newer version which fixes some of the issues with the original version.
|
14 |
+
|
15 |
+
Homepage: https://github.com/nyu-mll/crows-pairs, https://gitlab.inria.fr/french-crows-pairs
|
16 |
+
|
17 |
+
### Citation
|
18 |
+
|
19 |
+
```bibtex
|
20 |
+
@inproceedings{nangia-etal-2020-crows,
|
21 |
+
title = "{C}row{S}-Pairs: A Challenge Dataset for Measuring Social Biases in Masked Language Models",
|
22 |
+
author = "Nangia, Nikita and
|
23 |
+
Vania, Clara and
|
24 |
+
Bhalerao, Rasika and
|
25 |
+
Bowman, Samuel R.",
|
26 |
+
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
|
27 |
+
month = nov,
|
28 |
+
year = "2020",
|
29 |
+
address = "Online",
|
30 |
+
publisher = "Association for Computational Linguistics",
|
31 |
+
url = "https://aclanthology.org/2020.emnlp-main.154",
|
32 |
+
doi = "10.18653/v1/2020.emnlp-main.154",
|
33 |
+
pages = "1953--1967",
|
34 |
+
abstract = "Pretrained language models, especially masked language models (MLMs) have seen success across many NLP tasks. However, there is ample evidence that they use the cultural biases that are undoubtedly present in the corpora they are trained on, implicitly creating harm with biased representations. To measure some forms of social bias in language models against protected demographic groups in the US, we introduce the Crowdsourced Stereotype Pairs benchmark (CrowS-Pairs). CrowS-Pairs has 1508 examples that cover stereotypes dealing with nine types of bias, like race, religion, and age. In CrowS-Pairs a model is presented with two sentences: one that is more stereotyping and another that is less stereotyping. The data focuses on stereotypes about historically disadvantaged groups and contrasts them with advantaged groups. We find that all three of the widely-used MLMs we evaluate substantially favor sentences that express stereotypes in every category in CrowS-Pairs. As work on building less biased models advances, this dataset can be used as a benchmark to evaluate progress.",
|
35 |
+
}
|
36 |
+
|
37 |
+
@inproceedings{neveol-etal-2022-french,
|
38 |
+
title = "{F}rench {C}row{S}-Pairs: Extending a challenge dataset for measuring social bias in masked language models to a language other than {E}nglish",
|
39 |
+
author = {N{\'e}v{\'e}ol, Aur{\'e}lie and
|
40 |
+
Dupont, Yoann and
|
41 |
+
Bezan{\c{c}}on, Julien and
|
42 |
+
Fort, Kar{\"e}n},
|
43 |
+
booktitle = "Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
|
44 |
+
month = may,
|
45 |
+
year = "2022",
|
46 |
+
address = "Dublin, Ireland",
|
47 |
+
publisher = "Association for Computational Linguistics",
|
48 |
+
url = "https://aclanthology.org/2022.acl-long.583",
|
49 |
+
doi = "10.18653/v1/2022.acl-long.583",
|
50 |
+
pages = "8521--8531",
|
51 |
+
abstract = "Warning: This paper contains explicit statements of offensive stereotypes which may be upsetting.Much work on biases in natural language processing has addressed biases linked to the social and cultural experience of English speaking individuals in the United States. We seek to widen the scope of bias studies by creating material to measure social bias in language models (LMs) against specific demographic groups in France. We build on the US-centered CrowS-pairs dataset to create a multilingual stereotypes dataset that allows for comparability across languages while also characterizing biases that are specific to each country and language. We introduce 1,679 sentence pairs in French that cover stereotypes in ten types of bias like gender and age. 1,467 sentence pairs are translated from CrowS-pairs and 212 are newly crowdsourced. The sentence pairs contrast stereotypes concerning underadvantaged groups with the same sentence concerning advantaged groups. We find that four widely used language models (three French, one multilingual) favor sentences that express stereotypes in most bias categories. We report on the translation process from English into French, which led to a characterization of stereotypes in CrowS-pairs including the identification of US-centric cultural traits. We offer guidelines to further extend the dataset to other languages and cultural environments.",
|
52 |
+
}
|
53 |
+
```
|
54 |
+
|
55 |
+
### Groups and Tasks
|
56 |
+
|
57 |
+
#### Groups
|
58 |
+
|
59 |
+
- `crows_pairs_english`: The entire English subset of the CrowS-Pairs dataset.
|
60 |
+
- `crows_pairs_french`: The entire French subset of the CrowS-Pairs dataset.
|
61 |
+
|
62 |
+
#### Tasks
|
63 |
+
|
64 |
+
|
65 |
+
The following tasks evaluate sub-areas of bias in the English CrowS-Pairs dataset:
|
66 |
+
- `crows_pairs_english_age`
|
67 |
+
- `crows_pairs_english_autre`
|
68 |
+
- `crows_pairs_english_disability`
|
69 |
+
- `crows_pairs_english_gender`
|
70 |
+
- `crows_pairs_english_nationality`
|
71 |
+
- `crows_pairs_english_physical_appearance`
|
72 |
+
- `crows_pairs_english_race_color`
|
73 |
+
- `crows_pairs_english_religion`
|
74 |
+
- `crows_pairs_english_sexual_orientation`
|
75 |
+
- `crows_pairs_english_socioeconomic`
|
76 |
+
|
77 |
+
The following tasks evaluate sub-areas of bias in the French CrowS-Pairs dataset:
|
78 |
+
- `crows_pairs_french_age`
|
79 |
+
- `crows_pairs_french_autre`
|
80 |
+
- `crows_pairs_french_disability`
|
81 |
+
- `crows_pairs_french_gender`
|
82 |
+
- `crows_pairs_french_nationality`
|
83 |
+
- `crows_pairs_french_physical_appearance`
|
84 |
+
- `crows_pairs_french_race_color`
|
85 |
+
- `crows_pairs_french_religion`
|
86 |
+
- `crows_pairs_french_sexual_orientation`
|
87 |
+
- `crows_pairs_french_socioeconomic`
|
88 |
+
|
89 |
+
All tasks evaluate the percentage of more-stereotypical sentences that are rated as more likely by a model than the non-stereotypical sentences (`pct_stereotype`), as well as the average absolute difference of loglikelihoods between the sentences in the pairs.
|
90 |
+
|
91 |
+
### Checklist
|
92 |
+
|
93 |
+
* [x] Is the task an existing benchmark in the literature?
|
94 |
+
* [x] Have you referenced the original paper that introduced the task?
|
95 |
+
* [x] If yes, does the original paper provide a reference implementation?
|
96 |
+
* [x] The original paper does not for causal language models, so this is a novel formulation of the task for autoregressive LMs.
|
97 |
+
|
98 |
+
If other tasks on this dataset are already supported:
|
99 |
+
* [x] Is the "Main" variant of this task clearly denoted?
|
100 |
+
* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates?
|
101 |
+
* [x] Have you noted which, if any, published evaluation setups are matched by this variant?
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_age.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_english_age
|
3 |
+
dataset_name: english
|
4 |
+
process_docs: !function utils.filter_age
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_disability.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_english_disability
|
3 |
+
dataset_name: english
|
4 |
+
process_docs: !function utils.filter_disability
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_gender.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_english_gender
|
3 |
+
dataset_name: english
|
4 |
+
process_docs: !function utils.filter_gender
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_nationality.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_english_nationality
|
3 |
+
dataset_name: english
|
4 |
+
process_docs: !function utils.filter_nationality
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_religion.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_english_religion
|
3 |
+
dataset_name: english
|
4 |
+
process_docs: !function utils.filter_religion
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_english_sexual_orientation.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_english_sexual_orientation
|
3 |
+
dataset_name: english
|
4 |
+
process_docs: !function utils.filter_orientation
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french.yaml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_french
|
3 |
+
dataset_name: french
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_age.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_french_age
|
3 |
+
dataset_name: french
|
4 |
+
process_docs: !function utils.filter_age
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_disability.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_french_disability
|
3 |
+
dataset_name: french
|
4 |
+
process_docs: !function utils.filter_disability
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_gender.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_french_gender
|
3 |
+
dataset_name: french
|
4 |
+
process_docs: !function utils.filter_gender
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_nationality.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_french_nationality
|
3 |
+
dataset_name: french
|
4 |
+
process_docs: !function utils.filter_nationality
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_physical_appearance.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_french_physical_appearance
|
3 |
+
dataset_name: french
|
4 |
+
process_docs: !function utils.filter_appearance
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_race_color.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_french_race_color
|
3 |
+
dataset_name: french
|
4 |
+
process_docs: !function utils.filter_race_color
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_religion.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_french_religion
|
3 |
+
dataset_name: french
|
4 |
+
process_docs: !function utils.filter_religion
|
lm-evaluation/lm_eval/tasks/crows_pairs/crows_pairs_french_socioeconomic.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
include: crows_pairs_english.yaml
|
2 |
+
task: crows_pairs_french_socioeconomic
|
3 |
+
dataset_name: french
|
4 |
+
process_docs: !function utils.filter_socio
|
lm-evaluation/lm_eval/tasks/crows_pairs/utils.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
|
3 |
+
|
4 |
+
def process_results(doc, results):
|
5 |
+
lls, _ = zip(*results)
|
6 |
+
|
7 |
+
likelihood1, likelihood2 = lls
|
8 |
+
|
9 |
+
# Calculate the absolute difference in loglikelihoods
|
10 |
+
diff = abs(likelihood1 - likelihood2)
|
11 |
+
|
12 |
+
# if stereotypical sentence more likely (loglikelihood higher)
|
13 |
+
# then treat this as predicting stereotyped sentence
|
14 |
+
acc = 1.0 if likelihood1 > likelihood2 else 0.0
|
15 |
+
|
16 |
+
return {"likelihood_diff": diff, "pct_stereotype": acc}
|
17 |
+
|
18 |
+
|
19 |
+
def doc_to_choice(doc):
|
20 |
+
return [doc["sent_more"], doc["sent_less"]]
|
21 |
+
|
22 |
+
|
23 |
+
def filter_dataset(dataset: datasets.Dataset, bias_type: str) -> datasets.Dataset:
|
24 |
+
return dataset.filter(lambda example: example["bias_type"].startswith(bias_type))
|
25 |
+
|
26 |
+
|
27 |
+
def filter_race_color(dataset: datasets.Dataset) -> datasets.Dataset:
|
28 |
+
return filter_dataset(dataset, "race-color")
|
29 |
+
|
30 |
+
|
31 |
+
def filter_socio(dataset: datasets.Dataset) -> datasets.Dataset:
|
32 |
+
return filter_dataset(dataset, "socioeconomic")
|
33 |
+
|
34 |
+
|
35 |
+
def filter_gender(dataset: datasets.Dataset) -> datasets.Dataset:
|
36 |
+
return filter_dataset(dataset, "gender")
|
37 |
+
|
38 |
+
|
39 |
+
def filter_age(dataset: datasets.Dataset) -> datasets.Dataset:
|
40 |
+
return filter_dataset(dataset, "age")
|
41 |
+
|
42 |
+
|
43 |
+
def filter_religion(dataset: datasets.Dataset) -> datasets.Dataset:
|
44 |
+
return filter_dataset(dataset, "religion")
|
45 |
+
|
46 |
+
|
47 |
+
def filter_disability(dataset: datasets.Dataset) -> datasets.Dataset:
|
48 |
+
return filter_dataset(dataset, "disability")
|
49 |
+
|
50 |
+
|
51 |
+
def filter_orientation(dataset: datasets.Dataset) -> datasets.Dataset:
|
52 |
+
return filter_dataset(dataset, "sexual-orientation")
|
53 |
+
|
54 |
+
|
55 |
+
def filter_nationality(dataset: datasets.Dataset) -> datasets.Dataset:
|
56 |
+
return filter_dataset(dataset, "nationality")
|
57 |
+
|
58 |
+
|
59 |
+
def filter_appearance(dataset: datasets.Dataset) -> datasets.Dataset:
|
60 |
+
return filter_dataset(dataset, "physical-appearance")
|
61 |
+
|
62 |
+
|
63 |
+
def filter_autre(dataset: datasets.Dataset) -> datasets.Dataset:
|
64 |
+
return filter_dataset(dataset, "autre")
|
lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_as.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
dataset_path: ai4bharat/IndicWikiBio
|
5 |
+
dataset_name: as
|
6 |
+
validation_split: validation
|
7 |
+
test_split: test
|
8 |
+
output_type: generate_until
|
9 |
+
doc_to_text: "infobox: {{infobox}}
|
10 |
+
Summary is :"
|
11 |
+
doc_to_target: summary
|
12 |
+
metric_list:
|
13 |
+
- metric: bleu
|
14 |
+
- metric: ter
|
15 |
+
- metric: chrf
|
16 |
+
generation_kwargs:
|
17 |
+
until:
|
18 |
+
- "\n"
|
19 |
+
do_sample: false
|
20 |
+
temperature: 0.0
|
21 |
+
repeats: 1
|
22 |
+
metadata:
|
23 |
+
version: 1.0
|
24 |
+
|
25 |
+
task: indicwikibio-as
|
lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_hi.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
dataset_path: ai4bharat/IndicWikiBio
|
5 |
+
dataset_name: hi
|
6 |
+
validation_split: validation
|
7 |
+
test_split: test
|
8 |
+
output_type: generate_until
|
9 |
+
doc_to_text: "infobox: {{infobox}}
|
10 |
+
Summary is :"
|
11 |
+
doc_to_target: summary
|
12 |
+
metric_list:
|
13 |
+
- metric: bleu
|
14 |
+
- metric: ter
|
15 |
+
- metric: chrf
|
16 |
+
generation_kwargs:
|
17 |
+
until:
|
18 |
+
- "\n"
|
19 |
+
do_sample: false
|
20 |
+
temperature: 0.0
|
21 |
+
repeats: 1
|
22 |
+
metadata:
|
23 |
+
version: 1.0
|
24 |
+
|
25 |
+
task: indicwikibio-hi
|
lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_kn.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
dataset_path: ai4bharat/IndicWikiBio
|
5 |
+
dataset_name: kn
|
6 |
+
validation_split: validation
|
7 |
+
test_split: test
|
8 |
+
output_type: generate_until
|
9 |
+
doc_to_text: "infobox: {{infobox}}
|
10 |
+
Summary is :"
|
11 |
+
doc_to_target: summary
|
12 |
+
metric_list:
|
13 |
+
- metric: bleu
|
14 |
+
- metric: ter
|
15 |
+
- metric: chrf
|
16 |
+
generation_kwargs:
|
17 |
+
until:
|
18 |
+
- "\n"
|
19 |
+
do_sample: false
|
20 |
+
temperature: 0.0
|
21 |
+
repeats: 1
|
22 |
+
metadata:
|
23 |
+
version: 1.0
|
24 |
+
|
25 |
+
task: indicwikibio-kn
|
lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_ml.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
dataset_path: ai4bharat/IndicWikiBio
|
5 |
+
dataset_name: ml
|
6 |
+
validation_split: validation
|
7 |
+
test_split: test
|
8 |
+
output_type: generate_until
|
9 |
+
doc_to_text: "infobox: {{infobox}}
|
10 |
+
Summary is :"
|
11 |
+
doc_to_target: summary
|
12 |
+
metric_list:
|
13 |
+
- metric: bleu
|
14 |
+
- metric: ter
|
15 |
+
- metric: chrf
|
16 |
+
generation_kwargs:
|
17 |
+
until:
|
18 |
+
- "\n"
|
19 |
+
do_sample: false
|
20 |
+
temperature: 0.0
|
21 |
+
repeats: 1
|
22 |
+
metadata:
|
23 |
+
version: 1.0
|
24 |
+
|
25 |
+
task: indicwikibio-ml
|
lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_or.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
dataset_path: ai4bharat/IndicWikiBio
|
5 |
+
dataset_name: or
|
6 |
+
validation_split: validation
|
7 |
+
test_split: test
|
8 |
+
output_type: generate_until
|
9 |
+
doc_to_text: "infobox: {{infobox}}
|
10 |
+
Summary is :"
|
11 |
+
doc_to_target: summary
|
12 |
+
metric_list:
|
13 |
+
- metric: bleu
|
14 |
+
- metric: ter
|
15 |
+
- metric: chrf
|
16 |
+
generation_kwargs:
|
17 |
+
until:
|
18 |
+
- "\n"
|
19 |
+
do_sample: false
|
20 |
+
temperature: 0.0
|
21 |
+
repeats: 1
|
22 |
+
metadata:
|
23 |
+
version: 1.0
|
24 |
+
|
25 |
+
task: indicwikibio-or
|
lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_pa.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
dataset_path: ai4bharat/IndicWikiBio
|
5 |
+
dataset_name: pa
|
6 |
+
validation_split: validation
|
7 |
+
test_split: test
|
8 |
+
output_type: generate_until
|
9 |
+
doc_to_text: "infobox: {{infobox}}
|
10 |
+
Summary is :"
|
11 |
+
doc_to_target: summary
|
12 |
+
metric_list:
|
13 |
+
- metric: bleu
|
14 |
+
- metric: ter
|
15 |
+
- metric: chrf
|
16 |
+
generation_kwargs:
|
17 |
+
until:
|
18 |
+
- "\n"
|
19 |
+
do_sample: false
|
20 |
+
temperature: 0.0
|
21 |
+
repeats: 1
|
22 |
+
metadata:
|
23 |
+
version: 1.0
|
24 |
+
|
25 |
+
task: indicwikibio-pa
|
lm-evaluation/lm_eval/tasks/indicwikibio/indicwikibio_ta.yaml
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file will be included in the generated language-specific task configs.
|
2 |
+
# It doesn't have a yaml file extension as it is not meant to be imported directly
|
3 |
+
# by the harness.
|
4 |
+
dataset_path: ai4bharat/IndicWikiBio
|
5 |
+
dataset_name: ta
|
6 |
+
validation_split: validation
|
7 |
+
test_split: test
|
8 |
+
output_type: generate_until
|
9 |
+
doc_to_text: "infobox: {{infobox}}
|
10 |
+
Summary is :"
|
11 |
+
doc_to_target: summary
|
12 |
+
metric_list:
|
13 |
+
- metric: bleu
|
14 |
+
- metric: ter
|
15 |
+
- metric: chrf
|
16 |
+
generation_kwargs:
|
17 |
+
until:
|
18 |
+
- "\n"
|
19 |
+
do_sample: false
|
20 |
+
temperature: 0.0
|
21 |
+
repeats: 1
|
22 |
+
metadata:
|
23 |
+
version: 1.0
|
24 |
+
|
25 |
+
task: indicwikibio-ta
|